query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Get the folder above a Rez package, assuming the path is to a built Rez package. The "packages path" of a Rez package is basically "The path that would be needed in order to make this package discoverable by the rezenv command". For example, a released package like "~/.rez/packages/int/foo/1.0.0" has a packages path like "~/.rez/packages/int" but if the package is not built and is just a source package like "~/repositories/my_packages/foo" then the packages path is "~/repositories/my_packages".
def get_packages_path_from_package(package): root = finder.get_package_root(package) if is_built_package(package): package_name_folder = os.path.dirname(root) return os.path.dirname(package_name_folder) return os.path.dirname(root)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_package_path(path: Path) -> Optional[Path]:\n for package_path in path.iterdir():\n if is_package_dir(package_path):\n return package_path", "def pypkgpath(self):\n pkgpath = None\n for parent in self.parts(reverse=True):\n if parent.isdir():\n if not parent.join(\"__init__.py\").exists():\n break\n if not isimportable(parent.basename):\n break\n pkgpath = parent\n return pkgpath", "def get_package_dir():\n return Path(__file__).parent", "def get_package_path():\n package_name = get_package_name()\n return package_name.replace('.', '/')", "def _pkg_path(self, pkg):\n r = rospkg.RosPack()\n pkg_path = r.get_path(pkg) \n return pkg_path", "def _pkg_path(self, pkg):\n r = rospkg.RosPack()\n pkg_path = r.get_path(pkg) \n return pkg_path", "def package_path(pkg):\n fname = pkgutil.get_loader(pkg).get_filename()\n dirname = op.dirname(fname)\n dirname = op.abspath(op.join(dirname, '..'))\n return dirname", "def get_package_root(package):\n path_to_package = package.resource.location\n\n if not os.path.isdir(path_to_package) and hasattr(package, \"filepath\"):\n return os.path.dirname(package.filepath)\n\n path = os.path.join(path_to_package, package.name, str(package.version))\n\n if not os.path.isdir(path):\n raise EnvironmentError(\n 'Package \"{package}\" has an invalid path \"{path}\".'\n \"\".format(package=package, path=path)\n )\n\n return path", "def get_pack_path():\r\n return get_package_path().replace(\"\\\\\", \"/\").replace(\"src\", \"\")", "def _parent_path(pkg, pkg_path):\n parent = pkg_path[: -len(pkg)] if pkg_path.endswith(pkg) else pkg_path\n return parent.rstrip(\"/\" + os.sep)", "def path(self):\n installed_packages_folder_path = site.getsitepackages()[0]\n return f'{installed_packages_folder_path}/{SITE_PACKAGES_FOLDER_NAME}'", "def package_folder(self):\n return self._base_package", "def _package_name(root_path, path):\n if not _under(path, root_path):\n raise ValueError('\"%s\" is not a subpath of \"%s\"' % (path, root_path))\n return path[len(root_path) + 1:].replace(os.sep, '.')", "def get_package_folders(which_folder: str = \"root\") -> Path:\n logg = logging.getLogger(f\"c.{__name__}.get_package_folders\")\n # logg.setLevel(\"INFO\")\n logg.debug(\"Start get_package_folders\")\n\n this_file_folder = Path(__file__).absolute().parent\n logg.debug(f\"{this_file_folder=}\")\n\n package_root_folder = this_file_folder.parent.parent.parent\n\n if which_folder == \"root\":\n return package_root_folder\n\n elif which_folder == \"tests\":\n tests_folder = package_root_folder / \"tests\"\n return tests_folder\n\n elif which_folder == \"template_epub\":\n template_epub_folder = package_root_folder / \"assets\" / \"template_epub\"\n return template_epub_folder\n\n elif which_folder == \"word_pairs\":\n word_pairs_folder = package_root_folder / \"assets\" / \"word_pairs\"\n return word_pairs_folder\n\n elif which_folder == \"common_words\":\n common_words_folder = package_root_folder / \"assets\" / \"common_words\"\n return common_words_folder\n\n else:\n raise KeyError(f\"Not recognized {which_folder}\")", "def package_repo_resource(self, *path):\n return self._module.PACKAGE_REPO_ROOT.join(*path)", "def getPackagePath(self):\n return self._packagePath", "def get_package_path(ontology, parent, package):\n result = get_ontology_name(ontology)\n result += '.v'\n result += get_ontology_version(ontology)\n result += '.'\n result += get_package_name(parent)\n result += '.'\n result += get_package_name(package)\n return result", "def GetPackageDirectory():\n return os.path.dirname(__file__)", "def _get_package_path(name): # 获取 模块包 路径, Flask() 中 引用\n try:\n return os.path.abspath(os.path.dirname(sys.modules[name].__file__))\n except (KeyError, AttributeError):\n return os.getcwd()", "def package_to_path(package):\n return package.replace('.','/')", "def findPkgPath(self, pkg):\r\n try:\r\n return self._rp.get_path(pkg)\r\n except rospkg.ResourceNotFound:\r\n raise ResourceNotFound('Can not find ROS package '\r\n '\"{0}\".'.format(pkg))", "def get_target_folder() -> str:\n return os.path.abspath(os.path.join(dirname(__file__), os.pardir, os.pardir, \"provider_packages\"))", "def repo_root() -> str:\n path = os.path.realpath(os.curdir)\n\n while True:\n if os.path.exists(os.path.join(path, \"setup.py\")):\n return path\n path = os.path.realpath(os.path.join(path, \"..\"))", "def package_path(self, package_name):\n if not self.is_adb_available():\n return None\n\n _path = self._do_adb_command('shell pm path ' + package_name)\n if _path:\n try:\n _path = _path.join(_path.split()) # remove \\r\\n\n _path = _path.split(':')\n if len(_path) > 1 and _path[0] == 'package':\n ret = _path[1]\n if ret.endswith('apkpackage'):\n # handle new android packages\n ret = '/'.join(ret.split('/')[:-1])\n return ret\n except ValueError:\n pass\n\n return None", "def _get_package_directory(self, package_name: str) -> str:\n return os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n '..', '..', 'assets', package_name, package_name,\n )\n )", "def get_path_from_package(package):\n if isinstance(package, str):\n pkg = package\n elif isinstance(package, _ModuleType):\n pkg = package.__package__\n else:\n raise ValueError('Invalid package type, must be str or module')\n dist = _pkg_resources.get_distribution(pkg)\n return dist.location, dist.version", "def app_package_path(self) -> str:\n return self._app_package_path", "def get_package_python_paths(package, paths):\n # Note: Here we're trying to get `package`'s specific changes to PYTHONPATH (if any)\n #\n # Unfortunately, the Rez API doesn't really support this yet.\n # There's 2 GitHub links that may one-day implement it though:\n # - https://github.com/nerdvegas/rez/issues/737\n # - https://github.com/nerdvegas/rez/pull/739\n #\n # Reference: https://rez-talk.slack.com/archives/CHELFCTFB/p1578604659006100\n #\n # Once that work is merged, replace `get_package_python_paths` with it.\n #\n root = finder.get_package_root(package)\n\n if is_built_package(package):\n return {path for path in paths if filer.in_directory(path, root, follow=False)}\n\n output = set()\n\n for path in paths:\n # If the Rez package is a source Rez package + has variants\n # we need to strip the \"variants\" out of `path`, before\n # returning it.\n #\n try:\n variant_less_path = next(\n _iter_variant_extracted_paths(root, path, package.variants or [])\n )\n except StopIteration:\n pass\n else:\n output.add(variant_less_path)\n\n continue\n\n if filer.in_directory(path, root, follow=False) or filer.in_directory(\n path, root, follow=True\n ):\n output.add(path)\n\n continue\n\n return output", "def check_package_path(pkg):\n src_dir_root = ''\n print(\"[root-get] DEBUG: Checking package path\")\n check_package_name = os.system('find %s -maxdepth 1 -type d -name \"%s\" ! -path \"*tutorials*\" ! -path \"*dictpch*\"' % (ROOT_SOURCES, pkg))\n if check_package_name != 0:\n print(\"Not a ROOT package (we are working only with ROOT packages for now.)\")\n return False\n else:\n # if have such directory in root then we can try to get it's real path\n path = PathChecker()\n src_dir_root = path.path4pkg(pkg, ROOT_SOURCES)\n print(\"[root-get] We would use a package from {0:s}\".format(src_dir_root))\n return src_dir_root", "def package_metadata_path(self, package=None):\n profile_path, _ = metadata.get_paths(self.workspace, self.profile)\n if package is None:\n return os.path.join(profile_path, 'packages')\n return os.path.join(profile_path, 'packages', package.name)" ]
[ "0.71920997", "0.6838155", "0.6807825", "0.68067145", "0.6790578", "0.6790578", "0.67490464", "0.67232215", "0.65178114", "0.651199", "0.6466275", "0.63531727", "0.6334713", "0.6315311", "0.62793267", "0.6258786", "0.6246984", "0.6219219", "0.6211382", "0.61962104", "0.6135594", "0.61189187", "0.6105407", "0.6063939", "0.60606277", "0.59747034", "0.5968869", "0.59605736", "0.59309125", "0.59155387" ]
0.7395061
0
Takes a statement string and a list of statement strings. Returns the closest matching statement from the list.
def get(self, input_statement): statement_list = self.chatbot.storage.get_response_statements(input_statement.text) print("from adapter: "+ str(len(statement_list))) if not statement_list: if self.chatbot.storage.count(): # Use a randomly picked statement self.logger.info( 'No statements have known responses. ' + 'Choosing a default response to return.' ) input_statement.confidence = 0 return input_statement else: raise self.EmptyDatasetException() closest_match = input_statement closest_match.confidence = 0 # Find the closest matching known statement for statement in statement_list: confidence = self.compare_statements(input_statement, statement) if confidence > closest_match.confidence: statement.confidence = confidence closest_match = statement return closest_match
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closest(text, database):\n from fuzzywuzzy import process\n\n # Check if an exact match exists\n if database.find(text):\n return text\n\n # Get the closest matching statement from the database\n return process.extract(text, database.keys(), limit=1)[0][0]", "def closest_match(\r\n reference_sequence: str, query_sequences: List[str], matrix_str: str = BLOSUM62\r\n) -> Union[str, List, None]:\r\n scores = [\r\n (reference_sequence, query_sequence, matrix_score(reference_sequence, query_sequence, matrix_str))\r\n for query_sequence in query_sequences\r\n ]\r\n if len(scores) == 0:\r\n return None\r\n max_score = max(scores, key=itemgetter(2))\r\n max_sequences = [query_sequence for _, query_sequence, score in scores if score == max_score[2]]\r\n if len(max_sequences) == 1:\r\n return max_sequences[0]\r\n return max_sequences", "def closest_phrase(phrases, possibility):\n\n return sorted(phrases, key=lambda w: abs(possibility-w.possibility))[0]", "def find_nearest_synsets(target_synsets, subst_synsets, pos: Optional[str] = None):\n # TODO: Parallelize processing\n dists = [\n (tgt_syn, sbt_syn, dist)\n for tgt_syn in target_synsets\n for sbt_syn in subst_synsets\n for dist in [tgt_syn.shortest_path_distance(sbt_syn)]\n if dist is not None\n ]\n\n if len(dists) == 0:\n return None, None\n\n tgt_sense, sbt_sense, _ = min(dists, key=lambda x: x[2])\n\n return tgt_sense, sbt_sense", "def _nearest_datetime(self, datetime_list, target_datetime):\n if not datetime_list:\n raise errors.ParserError(\n \"Input parameter datetime_list length is zero. Required\"\n \" parameters: [datetime.datetime], datetime.datetime\")\n work_list = [entry for entry in datetime_list if entry < target_datetime]\n if not work_list:\n raise errors.ParserError(\n \"work_list length is zero. Entries in datetime_list\"\n \" {} are not < target_datetime {}\".format(datetime_list,\n target_datetime))\n return min(\n work_list,\n key=lambda datetime_entry: abs(datetime_entry - target_datetime))", "def get(self, input_statement):\n statement_list = self.context.storage.get_response_statements(input_statement.text)\n if not statement_list:\n if self.has_storage_context:\n # Use a randomly picked statement\n self.logger.info(\n u'No statements have known responses. ' +\n u'Choosing a random response to return.'\n )\n return 0, self.context.storage.get_random()\n else:\n raise self.EmptyDatasetException()\n\n closest_match = input_statement\n closest_similarity = -1\n total_similarity = 0\n\n # For each option in the list of options\n for statement in statement_list:\n similarity = self.compare_statements(input_statement, statement)\n\n total_similarity += similarity\n\n if similarity > closest_similarity:\n closest_similarity = similarity\n closest_match = statement\n\n try:\n confidence = closest_similarity / total_similarity\n except:\n confidence = 0\n\n return confidence, closest_match", "def word_nearest(word_list, target, condition = None, consider_phase = True):\n \n if not condition:\n condition = lambda t: True\n \n min_distance = 100\n min_word = None\n \n def word_distance(word1, word2):\n position1 = word1.position\n position2 = word2.position\n\n distance = [a-b for a, b in zip(position1, position2)]\n\n return np.sum(np.abs(distance))\n \n if isinstance(word_list, Word):\n word_list = [word_list]\n elif isinstance(word_list, list):\n #word_list = word_list\n pass\n else:\n print (word_list)\n raise TypeError()\n \n for word in word_list:\n phase = word.phase\n for word_compare in target:\n if not condition(word_compare):\n continue\n elif consider_phase and phase - word_compare.phase:\n continue\n\n distance = word_distance(word, word_compare)\n #print (word_compare, distance)\n if min_distance > distance:\n min_distance = distance\n min_word = word_compare\n elif min_distance == distance:\n pass\n # should be revised\n\n \n return min_word", "def get(self, input_statement):\n statement_list = self.context.storage.get_response_statements(input_statement.text)\n if not statement_list:\n if self.has_storage_context:\n # Use a randomly picked statement\n self.logger.info(\n u'No statements have known responses. ' +\n u'Choosing a random response to return.'\n )\n return 0, self.context.storage.get_random()\n else:\n raise self.EmptyDatasetException()\n\n closest_match = input_statement\n closest_similarity = -1\n\n # Find the closest matching known statement\n for statement in statement_list:\n text = statement['text']\n tagDiff = statement['tagDiff']\n pDff = statement['pDff']\n similarity = self.compare_statements(input_statement, text)\n # 根据标签匹配度,处理相似度\n if 'strict' in statement:\n # similarity = similarity\n if pDff < 0.3:\n similarity = similarity + 8\n elif (pDff >= 0.3) and (pDff < 0.5):\n similarity = similarity + 3\n elif pDff >= 0.5:\n similarity = similarity - 2\n else:\n if tagDiff == 0:\n similarity = similarity + 22\n elif pDff <= 0.25:\n similarity = similarity + 15\n elif pDff < 0.4 and pDff > 0.25:\n similarity = similarity + 8\n elif (pDff >= 0.3) and (pDff < 0.4):\n similarity = similarity + 2\n\n if similarity > closest_similarity:\n closest_similarity = similarity\n closest_match = text\n # print(tagDiff, pDff, similarity, input_statement,text)\n\n # Convert the confidence integer to a percent\n \n confidence = closest_similarity / 100.0\n return confidence, closest_match", "def closest_match(num,num_list):\n\tdiffs = np.abs(np.subtract(num,num_list))\n\treturn num_list[np.argmin(diffs)]", "def get_next_eligible_statement(self, iter_statements, target_comment, target_statement):\n last_statement_found = target_statement\n try:\n target_statement = next(iter_statements)\n while (target_comment.line > target_statement.startline):\n last_statement_found = target_statement\n target_statement = next(iter_statements)\n return target_statement, last_statement_found\n except StopIteration:\n return None, last_statement_found", "def closest_word_to(word, some_words):\n closest = ''\n distance = len(word)\n for target in some_words:\n this_distance = len(set(target) - set(word))\n if this_distance < distance:\n distance = this_distance\n closest = target\n return closest", "def findClosestMatchingGesture(strokes, gestureList, maxDifference=None):\n if len(gestureList) == 0:\n return None\n\n #gestureList = [list(frozenset(tuple(gesture))) for gesture in gestureList] # make a unique list\n gestureList = frozenset([tuple(gesture) for gesture in gestureList])\n distances = {}\n for g in gestureList:\n levDist = levenshteinDistance(strokes, g)\n if maxDifference is None or levDist <= maxDifference:\n distances.setdefault(levDist, [])\n distances[levDist].append(g)\n\n if not distances:\n return None # No matching gestures are within the tolerance of maxDifference.\n\n return tuple(distances[min(distances.keys())])", "def get_closest_levenshtein(word, possible_words, threshold):\n result = None\n min_distance = 10\n for possible_word in possible_words:\n word_distance = distance(word, possible_word)\n if word_distance < min_distance:\n result = possible_word\n min_distance = word_distance\n result = result if min_distance < threshold else None\n return result, min_distance", "def findNearestCoin(mazeMap, playerLocation, coinsList):\n \n routingTable = sp.dijkstra(mazeMap, playerLocation)\n \n nearest = -1\n distance = float('inf')\n for coin in coinsList:\n if routingTable[coin][1] < distance :\n distance = routingTable[coin][1] < distance\n nearest = coin\n \n return sp.orderPath(routingTable, playerLocation, nearest, [])", "def _get_closest_station_by_zcta_ranked(zcta):\n\n zcta = zcta.zfill(5) # Ensure that we have 5 characters, and if not left-pad it with zeroes.\n lat, lon = zcta_to_lat_long(zcta)\n finding_station = True\n rank = 0\n while finding_station:\n rank = rank + 1\n station_ranking = _rank_stations_by_distance_and_quality(lat, lon)\n station, warnings = select_station(station_ranking, rank=rank)\n\n # Ignore stations that begin with A\n if str(station)[0] != 'A':\n finding_station = False\n\n return station, warnings, lat, lon", "def fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=10,\n lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1):\n score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring)\n prev_match, prev_lower = False, False\n prev_sep = True # so that matching first letter gets sep_bonus\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n matched_indices = []\n\n while s_idx != s_len:\n p_char = pattern[p_idx] if (p_idx != p_len) else None\n s_char = instring[s_idx]\n p_lower = p_char.lower() if p_char else None\n s_lower, s_upper = s_char.lower(), s_char.upper()\n\n next_match = p_char and p_lower == s_lower\n rematch = best_letter and best_lower == s_lower\n\n advanced = next_match and best_letter\n p_repeat = best_letter and p_char and best_lower == p_lower\n\n if advanced or p_repeat:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n\n if next_match or rematch:\n new_score = 0\n\n # apply penalty for each letter before the first match\n # using max because penalties are negative (so max = smallest)\n if p_idx == 0:\n score += max(s_idx * lead_penalty, max_lead_penalty)\n\n # apply bonus for consecutive matches\n if prev_match:\n new_score += adj_bonus\n\n # apply bonus for matches after a separator\n if prev_sep:\n new_score += sep_bonus\n\n # apply bonus across camelCase boundaries\n if prev_lower and s_char == s_upper and s_lower != s_upper:\n new_score += camel_bonus\n\n # update pattern index iff the next pattern letter was matched\n if next_match:\n p_idx += 1\n\n # update best letter match (may be next or rematch)\n if new_score >= best_letter_score:\n # apply penalty for now-skipped letter\n if best_letter is not None:\n score += unmatched_penalty\n best_letter = s_char\n best_lower = best_letter.lower()\n best_letter_idx = s_idx\n best_letter_score = new_score\n\n prev_match = True\n\n else:\n score += unmatched_penalty\n prev_match = False\n\n prev_lower = s_char == s_lower and s_lower != s_upper\n prev_sep = s_char in '_ '\n\n s_idx += 1\n\n if best_letter:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n\n return p_idx == p_len, score", "def find_matching_snd(string, pos, fst, snd):\n assert 0 <= pos <= len(string)\n assert string[pos] == fst\n\n level = 1\n i = pos\n\n while i < len(string):\n if string[i] == snd:\n if level > 0:\n level -= 1\n else:\n return i\n elif string[i] == fst:\n level += 1\n i += 1", "def get_closest(n):\n while n:\n try:\n return DNSZone.objects.get(name=n)\n except DNSZone.DoesNotExist:\n pass\n n = \".\".join(n.split(\".\")[1:])\n return None", "def call(targetlist, querylist, match=1, mismatch=2, gapopen=5, gapextend=0,\n ksize=31):\n for query in sorted(querylist, reverse=True, key=len):\n bestcigar = None\n bestscore = None\n besttarget = None\n bestorientation = None\n for target in sorted(targetlist, key=lambda record: record.name):\n cigar, score, strand = align_both_strands(\n target.sequence, query.sequence, match, mismatch, gapopen,\n gapextend\n )\n if bestscore is None or score > bestscore:\n bestscore = score\n bestcigar = cigar\n besttarget = target\n bestorientation = strand\n\n if bestorientation == -1:\n query.sequence = kevlar.revcom(query.sequence)\n for varcall in make_call(besttarget, query, bestcigar, ksize):\n yield varcall", "def closest_match(desired_language: {str, Language}, supported_languages: list,\n max_distance: int=25) -> (str, int):\n # Quickly return if the desired language is directly supported\n if desired_language in supported_languages:\n return desired_language, 0\n\n # Reduce the desired language to a standard form that could also match\n desired_language = standardize_tag(desired_language)\n if desired_language in supported_languages:\n return desired_language, 0\n\n match_distances = [\n (supported, tag_distance(desired_language, supported))\n for supported in supported_languages\n ]\n match_distances = [\n (supported, distance) for (supported, distance) in match_distances\n if distance <= max_distance\n ] + [('und', 1000)]\n\n match_distances.sort(key=itemgetter(1))\n return match_distances[0]", "def get_closest_point(source, targets, furthest=False):\n distance = float(\"inf\") if not furthest else 0\n position = cmds.xform(\n source, query=True, translation=True, worldSpace=True\n )\n closest_node = None\n for node in targets:\n node_pos = cmds.xform(\n node, query=True, translation=True, worldSpace=True\n )\n node_distance = (MVector(node_pos) - MVector(position)).length()\n is_different = (\n node_distance < distance\n if not furthest\n else node_distance > distance\n )\n if is_different:\n closest_node = node\n distance = node_distance\n\n return closest_node", "def match(self, word_list, expecting):\n\n if word_list:\n\n word = word_list.pop(0)\n\n if word[0] == expecting:\n return word\n else:\n return None\n else:\n return None", "def __find_closest_symbol(symbol):\n closest_symbol = []\n min_dist = float(\"inf\")\n\n for symbol_2 in symbols_info:\n if symbol != symbol_2:\n dist = math.sqrt(\n math.pow(symbol[4][0] - symbol_2[4][0], 2) +\n math.pow(symbol[4][1] - symbol_2[4][1], 2)\n )\n if dist < min_dist:\n min_dist = dist\n closest_symbol = symbol_2\n\n return closest_symbol", "def _find_closest_in_range(ranges: Iterable[CT], what_to_find: CT) -> Optional[CT]:\n\n ranges = sorted(ranges)\n\n while ranges:\n\n middle_item_index = len(ranges) // 2\n middle_item = ranges[middle_item_index]\n\n if what_to_find == middle_item:\n return what_to_find\n\n elif what_to_find > middle_item:\n\n if len(ranges) == 1:\n return middle_item\n\n ranges = ranges[middle_item_index:]\n\n elif what_to_find < middle_item:\n\n if ranges[middle_item_index - 1] < what_to_find:\n return ranges[middle_item_index - 1]\n\n ranges = ranges[:middle_item_index]", "def get_closest_commands(\n project_dictionary: Dictionaries, command_name: str, num: int = 3\n ) -> List[str]:\n known_commands = project_dictionary.command_name.keys()\n closest_matches = difflib.get_close_matches(command_name, known_commands, n=num)\n return closest_matches", "def findSmallest(distancesWithNames):\n smallest = distancesWithNames[0][2]\n smallestIndex = -1\n for i in range(len(distancesWithNames)):\n if smallest >= distancesWithNames[i][2]:\n smallest = distancesWithNames[i][2]\n smallestIndex = i\n return smallestIndex", "def _nearest(arrlist_1, arrlist_2):\n tree = KDTree(arrlist_1);\n pts = tree.query(arrlist_2)\n\n return tree.data[pts[1][pts[0].argmin()]]", "def _find_closest_shape_in_list(self, shapes, datum):\n closest_dist_yet = np.inf\n closest_shape = None\n for s in shapes:\n closest_point_data = s.find_closest_point_data(datum)\n if closest_point_data.distance < closest_dist_yet:\n closest_shape = s\n closest_dist_yet = closest_point_data.distance\n return closest_shape", "def closest_fruit(maze, currX, currY, fruit_list):\n curr_min = sys.maxsize\n for position in fruit_list:\n distance = Astar(maze, currX, currY, position[0], position[1])\n if distance < curr_min:\n curr_min = distance\n return curr_min", "def closest_other_location(state):\n locations = others_locations(state)\n target = closest_other(state)\n return locations[target]" ]
[ "0.6810408", "0.59246445", "0.5841984", "0.535423", "0.5303888", "0.5230024", "0.5214862", "0.5204742", "0.51656586", "0.51357555", "0.513496", "0.5132792", "0.50543165", "0.5052505", "0.50511724", "0.5032725", "0.50302774", "0.50203186", "0.4952436", "0.49399436", "0.4928132", "0.49229452", "0.49205458", "0.49170104", "0.49007362", "0.48959884", "0.48958474", "0.48920688", "0.48820207", "0.48542544" ]
0.6239972
1
Check that the chatbot's storage adapter is available to the logic adapter and there is at least ne statement in the database.
def can_process(self, statement): return self.chatbot.storage.count()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check(self):\n # validate contents still to do - for now just check if it exists\n return os.path.exists(self.getDefaultDatabaseConnectionParameter()['path'])", "def check(self, connection):\n return True", "def check_connection(self):\n pass", "def is_available(**kwargs: Any) -> bool:\n try:\n _check_available()\n except Unavailable as e:\n logger.info('Database not available: %s', e)\n return False\n return True", "def _check_available() -> None:\n current_session().query(\"1\").from_statement(text(\"SELECT 1\")).all()", "def check_extensions(self):\n extensions = self.cloud.get_network_extensions()\n for network_extension in self.neutron_extensions:\n if network_extension not in extensions:\n LOGGER.warning(\n \"Cannot find Neutron extension: %s\", network_extension)\n self.is_skipped = True\n break", "def check_connection(self):\n return False", "def _check_connection(self):\n if \"_connection\" not in self.__dict__:\n message = \"use connect method before doing operation on this database\"\n raise Exception(message)", "def _do_check(self):\n try:\n #breakpoint()\n ApplicationsItem.objects.exists()\n #print (\"Checking\")\n return True\n\n except Exception:\n client.captureException()\n return False", "def check_availability(self):\n pass", "def allowed(self, request):\n try:\n storage_backend = stx_api.sysinv.get_storage_backend(request)\n if stx_api.sysinv.STORAGE_BACKEND_CEPH in storage_backend:\n return True\n except Exception:\n pass\n return False", "def check_for_data():\n if not (os.path.exists(ep.get_test_data_path()) or os.path.exists(ep.get_dbn_weight_path())):\n return False\n return True", "def validate_oee_error_8(self):\n sql = \"\"\"\n SELECT * FROM bdeview WHERE f6 NOT IN (SELECT f6 FROM bdeview LIMIT 1)\n \"\"\"\n lines = self.c.execute(sql).fetchall()\n return lines==[], lines", "def _check_queryable(self):\n if not self._bucket:\n raise Exception('Bucket has not been selected')", "def check_database(self):\r\n self.logger.log(logger.LogLevel.INFO, 'Checking database tables')\r\n table_stats = self.check_table(query.TABLE_STATS, query.QUERY_CREATE_TABLE_STATS())\r\n if table_stats is False:\r\n return False\r\n table_tweets = self.check_table(query.TABLE_TWEETS, query.QUERY_CREATE_TABLE_TWEETS())\r\n if table_tweets is False:\r\n return False\r\n table_posts = self.check_table(query.TABLE_POSTS, query.QUERY_CREATE_TABLE_POSTS())\r\n if table_posts is False:\r\n return False\r\n table_follows = self.check_table(query.TABLE_FOLLOWS, query.QUERY_CREATE_TABLE_FOLLOWS())\r\n if table_follows is False:\r\n return False\r\n return True", "def is_not_used(self):\n pass", "def _CheckAdb(cls):\n if cls._adb_command:\n return\n cls._adb_command = utils.FindExecutable(constants.ADB_BIN)\n if not cls._adb_command:\n raise errors.NoExecuteCmd(\"Can't find the adb command.\")", "def check_empty_db(loaded_db):\n _Helpers.check_db(\n loaded_db, nb_exps=0, nb_algos=0, nb_trials=0, nb_benchmarks=0\n )", "def is_version_data_existed(self):\n # if exists, skip\n # return \n\n return True", "def check_hawq(self):\n import params\n Logger.info(\"--- Check if HAWQ can write and query from a table ---\")\n table = params.table_definition['HAWQ']\n try:\n self.drop_table(table)\n self.create_table(table)\n self.insert_data(table)\n self.query_data(table)\n self.validate_data(table)\n except:\n Logger.error(\"SERVICE CHECK FAILED: HAWQ was not able to write and query from a table\")\n self.checks_failed += 1\n finally:\n self.drop_table(table)", "def checkPersistence(self, _, __): # pylint: disable=C0103\n return False", "def test_db_missing_read(env_setup, env_table, db_insert_test_data, db_read_test_data, response_test_data):\n test_string = DbManager(SqLiteHelper, {\"db_path\": env_setup, \"master_table\": env_table}) \\\n .processor(db_read_test_data.get(\"invalid2\")) # testing\n assert test_string == response_test_data.get(\"invalid_read2\")", "def is_bot(self) -> bool:", "def test_not_present_in_any_db(self):\n price = find_cheapest_price(\"Only Star Wars\")\n self.assertTrue(price == None)", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def needs_commons_db(self):\n return False", "def test_neg_exists_with_no_paramters(self):\n with pytest.raises(TypeError) as typeError:\n self.as_connection.exists()\n\n assert \"argument 'key' (pos 1)\" in str(typeError.value)", "def test_create_experiment_bad_storage(self):\n name = \"oopsie_bad_storage\"\n # Make sure there is no existing storage singleton\n\n with pytest.raises(NotImplementedError) as exc:\n create_experiment(\n name=name,\n storage={\"type\": \"legacy\", \"database\": {\"type\": \"idontexist\"}},\n )\n\n assert \"Could not find implementation of Database, type = 'idontexist'\" in str(\n exc.value\n )" ]
[ "0.5795223", "0.5549578", "0.5548149", "0.55225635", "0.545574", "0.54191005", "0.53849185", "0.53732127", "0.53596324", "0.5316263", "0.53120834", "0.53007185", "0.52840436", "0.52822834", "0.5219052", "0.52115256", "0.51778877", "0.5175398", "0.51726717", "0.5141932", "0.5140442", "0.51343304", "0.51211387", "0.51158756", "0.51153183", "0.51153183", "0.51153183", "0.51076066", "0.5103703", "0.51032436" ]
0.55863464
1
This function adds the task into the file todo.txt
def add(): try: task = sys.argv[2] file = open("todo.txt", "a") file.write(task + "\n") print('Added todo: "{}"'.format(task)) except IndexError: print("Error: Missing todo string. Nothing added!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_task():\n\n yourTask = []\n line = input(\"Add your task: \")\n yourTask.append(line)\n taskfile = open('tasks.txt', 'a')\n for line in yourTask:\n taskfile.write(\"%s\\n\" % line)\n taskfile.close()\n\n import menu", "def add_todo(taskname, deadline, priority, reminder, deleteflag):\n autodel()\n task = {\n 'name': taskname,\n 'deadline': str(deadline),\n 'priority': priority,\n 'reminder': reminder,\n 'no_del': deleteflag\n }\n\n if not exists(task['name']):\n with open(todofile, 'a') as todo:\n try:\n jdump = json.dumps(task) + '\\n'\n todo.write(jdump)\n return 0\n except json.decoder.JSONDecodeError:\n return 1", "def write_todo(self, todo):\n if todo != None:\n print 'added \"%s\"' % todo.text\n self.new_items.append(todo)", "def save_todo_file(self):\n\n if os.path.exists('TODO.txt'):\n os.remove('TODO.txt')\n todo_fp = open('TODO.txt', 'w')\n todo_items = self.todo_scroll_cell.get_item_list()\n in_progress_items = self.in_progress_scroll_cell.get_item_list()\n done_items = self.done_scroll_cell.get_item_list()\n for item in todo_items:\n todo_fp.write(item + '\\n')\n todo_fp.write('__IN_PROGRESS__' + '\\n')\n for item in in_progress_items:\n todo_fp.write(item + '\\n')\n todo_fp.write('__DONE__' + '\\n')\n for item in done_items:\n todo_fp.write(item + '\\n')\n todo_fp.close()\n self.master.show_message_popup('Saved', 'Your TODO list has been saved!')", "def write_task(task):\n \n logfile = open(TASKS_ORG_FILE, 'a')\n\n str = \"* TODO %s\\n:PROPERTIES:\\n:guid: %s\\n:END:\\n%s\\n\" % (task.title, task.guid, task.description)\n logfile.write(str)\n\n logfile.close()", "def add(self, task):\n self._count += 1\n path = os.path.join(self._root, \"%d_%s\" % (self._count, task.guid))\n j.sal.fs.writeFile(path, self._serialize_task(task))", "def insert_todo_task(self, task):\n next_id = self.get_largest_id() + 1\n self.conn.execute(\"\"\"\n INSERT INTO todo (id, desc, due_date, due_time)\n VALUES (?, ?, ?, ?);\n \"\"\", (next_id, task.desc, task.due_date, task.due_time))\n self.conn.commit()", "def add_task(self):\n task_title = self.display.ask_user_title()\n task_description = self.display.ask_user_description()\n task_due = self.display.ask_user_due()\n\n # Call the db function to add data\n self.db_link.add_task(task_title, task_description, task_due)\n self.display.print_success('\\nTask successfully added.\\n')", "def add(self, task):\n pass", "def todo_added(name, description):", "def add_todo():\n task = flask.request.form[\"task\"]\n todos.append(ToDo(task))\n return \"success\"", "def add_task():\n # get values from user\n responses = accept_inputs([\"Task label\", \"Short task description\", \"Parent task label\"])\n # insert into db\n query_no_results(\"insert into task values(?, ?, ?)\",\n [responses[\"Task label\"], responses[\"Short task description\"], responses[\"Parent task label\"]])\n print(\"New task created\")", "async def add(ctx, *, new_task: commands.clean_content):\n tasks[ctx.guild].append(new_task)\n await ctx.send(\"Added task \" + new_task)\n if len(ctx.message.mentions) > 0:\n await ctx.send(\n \" \".join(user.mention for user in ctx.message.mentions)\n + \" You have a new task!\"\n )", "def edit_todo(taskname, deadline, reminder, priority, deleteflag):\n edit_task = ''\n editables = ['deadline', 'reminder', 'priority', 'no_del']\n edited = [deadline, reminder, priority, deleteflag]\n\n if not exists(taskname):\n return False\n\n with open(todofile, 'r') as todo:\n tasks = todo.readlines()\n for task in tasks:\n try:\n task = json.loads(task)\n if taskname == task['name']:\n edit_task = task\n break\n except json.decoder.JSONDecodeError:\n return None\n \n remove_todo(taskname)\n\n #editing the tasks here\n for i, editable in enumerate(editables):\n if edited[i] is not None:\n if edited[i] == \"remove\":\n edit_task[editable] = None\n else:\n edit_task[editable] = edited[i]\n\n #making the task deadline json-serializable\n edit_task['deadline'] = str(edit_task['deadline'])\n\n with open(todofile, 'a') as todo:\n try:\n jdump = json.dumps(edit_task) + '\\n'\n todo.write(jdump)\n except json.decoder.JSONDecodeError:\n return None\n return True", "def add_task(self, task):\n\n # The pyrtm module is dynamic.\n # pylint: disable=no-member\n\n added = self.rtm.tasks.add(timeline=self.timeline,\n name=task['name'], list_id=self.list_id, parse=0)\n\n # TODO: record undoable transactions and undo them upon kb interrupt\n #if added.transaction.undoable == \"1\":\n #self.transactions.append(added.transaction.id)\n\n args = dict(\n timeline = self.timeline,\n list_id = self.list_id,\n taskseries_id = added.list.taskseries.id,\n task_id = added.list.taskseries.task.id,\n )\n\n if task.get('tags', None):\n # Should this be setTags?\n self.rtm.tasks.addTags(tags=','.join(task['tags']), **args)\n\n if task.get('due_date', None):\n self.rtm.tasks.setDueDate(due=task['due_date'],\n # TODO: Can we determine has_due_time?\n has_due_time=1,\n # We're using iso8601 so we don't need them to be specially parsed.\n parse=0,\n **args)\n\n if task.get('estimated', None):\n self.rtm.tasks.setEstimate(estimate=task['estimated'], **args)\n\n if task.get('priority', None):\n self.rtm.tasks.setPriority(priority=task['priority'], **args)\n\n if task.get('repeat', None):\n self.rtm.tasks.setRecurrence(repeat=task['repeat'], **args)\n\n if task.get('notes', None):\n if isinstance(task['notes'], list):\n notes = task['notes']\n else:\n notes = [ task['notes'] ]\n for note in notes:\n self.rtm.tasks.notes.add(note_title=note, note_text=note, **args)\n\n if task.get('url', None):\n self.rtm.tasks.setURL(url=task['url'], **args)\n\n # do the status changes last\n if task.get('completed', None):\n self.rtm.tasks.complete(**args)\n\n if task.get('deleted', None):\n self.rtm.tasks.delete(**args)\n\n return added", "def add_task(self, task):\n res = self.conn.cursor().execute(\"\"\"SELECT count(*) as \"order\" FROM tasks WHERE project_id=?\"\"\",\n (task['project_id'],))\n res = res.fetchone()\n order = int(res['order']) + 1\n cursor = self.conn.cursor().execute(\"INSERT INTO tasks VALUES (null, ?, ?, ?, ?, ?,?)\",\n (task['project_id'], order, task['description'], 0, datetime.now(), datetime.now(),))\n self.conn.commit()\n self.conn.cursor().execute(\"UPDATE projects SET last_update=? WHERE id=?\",\n (datetime.now(), task['project_id'],))\n self.conn.commit()\n return self.get_task(cursor.lastrowid)", "def add_task(self):\n print('Enter task')\n text_task = input()\n print('Enter deadline')\n new_task = self.Table(task=text_task, deadline=datetime.strptime(input(), '%Y-%m-%d'))\n self.session.add(new_task)\n self.session.commit()\n print('The task has been added!')\n print()", "def add_task(self, task):\n self.tasks.append(task)", "def add_task(self, task):\n self.tasks.append(task)", "def add_task(self, task):\n self.tasks.append(task)", "def add_task(self, task):\n self.tasks.append(task)", "def getTasks():\n\ttasks = open(\"todo.txt\").readlines()\n\tif len(tasks):\n\t for num in range(len(tasks) - 1, -1, -1):\n\t print(\"[%d] %s\" % (num + 1, tasks[num]), end=\"\")\n\telse:\n\t print(\"There are no pending todos!\")", "def save_tasks(self, task_file):\n\n\t\tutil.save(self.tasklist.tasks, task_file)", "def new_task(self):\n print \"Create a new task.\"\n\n # Collect new task info from user\n description = raw_input(\"Enter task (140 characters max) > \")\n due_date = raw_input(\"Enter due date as 'year-mm-dd' (optional). > \")\n tags = raw_input(\n \"Enter tags for the task (comma separated) (optional). > \")\n tag_list = [tag.strip() for tag in tags.split(',')]\n try:\n new_task = doto.Task(self.user, description, due_date, tag_list)\n except (NameError, ValueError) as e:\n # On error, print and return.\n print \"Task not created. Error: \", e\n raw_input(\"Press Enter to continue.\")\n return\n self.current_collection.add(new_task)\n return", "def create_task(text):\n new_task = Tasks(task_text=text) \n new_task.save()", "def add_task(self, task):\n raise NotImplementedError()", "def todo():\n print(\"OK\")", "def add(self, task):\n raise NotImplementedError()", "def assign(self, task=None):\n if task is None:\n print(\"\\n*** Add Task ***\")\n name = input(\"Name of the task?: \")\n try:\n priority = int(input(\"Priority of the task (1-->5): \"))\n except ValueError:\n priority = 1\n steps = []\n while 1:\n step = input(\"Add step #\" + str(len(steps) + 1) + \" (Enter empty to finish): \")\n if step:\n steps.append(step)\n else:\n break\n self.tasks.append(Task(name, priority, steps))\n self.save()\n self.sort()\n print(\"*\"*16)\n else:\n self.tasks.append(task)\n self.save()\n self.sort()", "def addTask(self, task):\n self.tasklist.append(task)" ]
[ "0.7847216", "0.7563006", "0.74652207", "0.7382538", "0.7079825", "0.6889404", "0.6856803", "0.68142754", "0.6805257", "0.67913866", "0.67630345", "0.6759012", "0.6645988", "0.6609657", "0.66038746", "0.6500612", "0.64990604", "0.6450098", "0.6450098", "0.6450098", "0.6450098", "0.6431113", "0.63932496", "0.63803786", "0.63691", "0.6365954", "0.6330748", "0.63159305", "0.6296533", "0.62770754" ]
0.85057044
0
This function reads the file from todo.txt and prints it onto the screen
def getTasks(): tasks = open("todo.txt").readlines() if len(tasks): for num in range(len(tasks) - 1, -1, -1): print("[%d] %s" % (num + 1, tasks[num]), end="") else: print("There are no pending todos!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_todo_file(self):\n\n todo = []\n in_progress = []\n done = []\n if os.path.exists('TODO.txt'):\n todo_fp = open('TODO.txt', 'r')\n state = 0\n line = todo_fp.readline()\n while line:\n line = line.strip()\n if state == 0:\n if line == '__IN_PROGRESS__':\n state = 1\n elif len(line) > 1:\n todo.append(line)\n elif state == 1:\n if line == '__DONE__':\n state = 2\n elif len(line) > 1:\n in_progress.append(line)\n elif state == 2:\n if len(line) > 1:\n done.append(line)\n line = todo_fp.readline()\n todo_fp.close()\n self.todo_scroll_cell.add_item_list(todo)\n self.in_progress_scroll_cell.add_item_list(in_progress)\n self.done_scroll_cell.add_item_list(done)", "def interactive(todofile):\n tmpfile = tempfile.NamedTemporaryFile(suffix='.txt', prefix='todo-',\n delete=False)\n print >> tmpfile\n print >> tmpfile , '# Todo items should be formed as <date> -- <todo>'\n print >> tmpfile , '# The date field is optional.'\n print >> tmpfile , '# Lines starting with # are ignored.'\n tmpfile.close()\n subprocess.call(['sensible-editor', tmpfile.name])\n with open(tmpfile.name) as writtenfile:\n add_items(todofile, writtenfile.readlines())\n os.remove(tmpfile.name)", "def save_todo_file(self):\n\n if os.path.exists('TODO.txt'):\n os.remove('TODO.txt')\n todo_fp = open('TODO.txt', 'w')\n todo_items = self.todo_scroll_cell.get_item_list()\n in_progress_items = self.in_progress_scroll_cell.get_item_list()\n done_items = self.done_scroll_cell.get_item_list()\n for item in todo_items:\n todo_fp.write(item + '\\n')\n todo_fp.write('__IN_PROGRESS__' + '\\n')\n for item in in_progress_items:\n todo_fp.write(item + '\\n')\n todo_fp.write('__DONE__' + '\\n')\n for item in done_items:\n todo_fp.write(item + '\\n')\n todo_fp.close()\n self.master.show_message_popup('Saved', 'Your TODO list has been saved!')", "def print_instructions():\n file = open('instructions.txt', 'r')\n content = file.read()\n print(colors.Color.GREEN + content + colors.Color.END)\n file.close()", "def read_file(filename) -> List[Todo]:\n with pathlib.Path(filename).expanduser().open('r') as fp:\n return [Todo(_id, line) for _id, line in enumerate(fp)]", "def main():\r\n save_file_location = \"Item_List.txt\"\r\n data_file_a = open(save_file_location, \"a\") # Opens ItemList.txt which\r\n # is accessible in the file variable, in append mode (using this so that\r\n # if the file exists, nothing happens, but if it does not exist, it gets\r\n # created from w3schools.com\r\n data_file_a.close() # Close the file, I now know it exists\r\n loaded_list = load_from_file(save_file_location)\r\n print(\"Welcome to the To-Do List - Version: 0.1.2\")\r\n divider(42) # Length of welcome statement above\r\n menu_loop(loaded_list, save_file_location)", "def read_file(filename=\"\"):\n with open(filename, 'r') as f:\n f_contents = f.read()\n print(f_contents, end='')", "def todo():\n print(\"OK\")", "def read_todo(taskname):\n autodel()\n with open(todofile, 'r') as todo:\n for task in todo:\n task = json.loads(task)\n if taskname in task['name']:\n return [task['name'], \n task['deadline'], \n task['priority'],\n task['reminder'],\n task['no_del']]\n return None", "def add():\n\ttry:\n\t task = sys.argv[2]\n\t file = open(\"todo.txt\", \"a\")\n\t file.write(task + \"\\n\")\n\t print('Added todo: \"{}\"'.format(task))\n\texcept IndexError:\n\t print(\"Error: Missing todo string. Nothing added!\")", "def getReport():\n\tpending = len(open(\"todo.txt\").readlines())\n\tdone = len(open(\"done.txt\").readlines())\n\tprint(\n\t \"{} Pending : {} Completed : {}\"\n\t .format(datetime.today().strftime(\"%Y-%m-%d\"),\n\t pending,\n\t done\n\t ))", "def printall():\n all_tasks = {\n 'Name': [],\n 'Deadline':[],\n 'Priority':[],\n 'Autodelete':[]\n }\n with open(todofile, 'r') as todo:\n try: #list compre for loading dict objs in to list, sorting by deadline\n tasks = sorted([json.loads(task) for task in todo.readlines()], \n key= lambda task: task['deadline'])\n except json.decoder.JSONDecodeError:\n return 1\n if not tasks:\n return None\n for task in tasks:\n all_tasks['Name'].append(task['name'])\n all_tasks['Deadline'].append(task['deadline'])\n all_tasks['Priority'].append(task['priority'])\n all_tasks['Autodelete'].append(\n 'No' if task['no_del'] else 'Yes')\n return all_tasks", "async def info(self, ctx):\r\n openfile = open(\"info.txt\", \"r\")\r\n embed = discord.Embed(title='Aristobot', description='This is a bot made by Aristoza that uses the TrueSkill '\r\n 'python package (http://trueskill.org/) which is based on '\r\n 'the '\r\n 'TrueSkill rating system developed by Microsoft.',\r\n color=33023)\r\n embed.add_field(name='How it works', value=openfile.read(), inline=False)\r\n await ctx.send(embed=embed)", "def read_file(file):\n f = open(file, 'r')\n print(f.read())", "def Run():\n file_name = AskForFileName()\n file_content = ReadFileContents(file_name)\n head_list = BuildHeadList(file_content)\n atom_list = BuildAtomList(file_content)\n tail_list = BuildTailList(file_content)\n WriteNewFile(head_list, atom_list, tail_list)", "def exercise_retrieve(file_name):\n with open(file_name, \"r\") as f:\n content = f.read()\n return print(content)", "def search_todo(filtered_files):\n\n global F_COUNTER\n global SEARCHED\n todo = re.compile('\\\\bTODO\\\\b.*')\n fixme = re.compile('\\\\bFIXME\\\\b.*')\n\n for files in filtered_files:\n f = open(os.path.abspath(files), 'r')\n printed = False\n SEARCHED += 1\n for n, row in enumerate(f.readlines()):\n\n found_todo = todo.search(row)\n found_fixme = fixme.search(row)\n if found_todo or found_fixme:\n if not printed:\n print('')\n click.secho(files, fg='blue', bold=True)\n printed = True\n F_COUNTER += 1\n if found_todo:\n pretty_print(str(n+1), found_todo.group())\n else:\n pretty_print(str(n+1), found_fixme.group())\n\n f.close()", "def view(self):\n le = os.path.getsize(\"/home/theodis/PycharmProjects/Assignment/Dept.txt\")\n if le == 0:\n tkinter.messagebox.showerror(\"error\", \"File is empty\")\n else:\n f = open(\"Dept.txt\", \"rb\")\n d = pickle.load(f)\n data = \"\"\n for i, j in d.items():\n da = \"\"\n for k, l in j.items():\n da = da + l + \"\\t\" + \"\\t\"\n data = data + da + \"\\n\"\n self.display.insert(END, data)\n self.display.config(state=DISABLED)", "def exercise2(self):\n my_file = open('my_file.txt', 'w+')\n my_file.write(\"Look Ma! I'm a file\")\n my_file.close()\n content = my_file.read()\n print content", "def add_task():\n\n yourTask = []\n line = input(\"Add your task: \")\n yourTask.append(line)\n taskfile = open('tasks.txt', 'a')\n for line in yourTask:\n taskfile.write(\"%s\\n\" % line)\n taskfile.close()\n\n import menu", "def load_from_file(save_location): # This is a function for readability -\r\n # opens txt file in read mode and loads it\r\n # into an array (list) of ListItem variables\r\n data_file_r = open(save_location, \"r\") # Open txt file in read mode\r\n list_item = [\"Text\", -1, 2, True] # Item, Item Priority, group, is visible\r\n todo = [] # make a list of lists\r\n temp = 1 # Temporary counter variable to reconstruct lists from .txt file\r\n line_counter = 1\r\n try:\r\n for item in data_file_r: # loop through each line in the file, one at\r\n # a time - from w3schools.com\r\n if (line_counter - 1) % 5 != 0 and line_counter > 0:\r\n cleaned_item = \"\"\r\n for character_index in range(len(\r\n item)): # Loop through each character in the extracted\r\n # string\r\n if character_index != len(\r\n item) - 1: # if it is not the last character, add\r\n # it to the cleaned string\r\n cleaned_item += item[character_index]\r\n # Add every character to a\r\n # but \\n\r\n if temp == 1: # Item Text\r\n list_item[0] = cleaned_item\r\n temp = 2\r\n elif temp == 2: # Item Priority\r\n list_item[1] = int(cleaned_item)\r\n temp = 3\r\n elif temp == 3: # Item Group\r\n list_item[2] = int(cleaned_item)\r\n temp = 4\r\n elif temp == 4: # Is Visible\r\n if cleaned_item == \"False\":\r\n list_item[3] = False\r\n else: # Assume the item is visible if the text is not\r\n # False\r\n list_item[3] = True\r\n todo.insert(0, ListItem(list_item[0], list_item[1],\r\n list_item[2], list_item[3]))\r\n temp = 1\r\n else: # If some error occurred and a condition outside of the\r\n # possible four is met, restart\r\n temp = 1\r\n line_counter += 1\r\n except ValueError:\r\n print(\"An error has occurred trying to load the file\")\r\n result = int(clean_input(\r\n \"Please enter a 2 to overwrite the current save file and start \"\r\n \"over or any other number to exit the program\"))\r\n if result == 2:\r\n key = random.randint(2, 9) # Generate a random integer between 2\r\n # and 9 to be used as a second dynamic check\r\n if key == 2:\r\n key = 1 # If the random number is 2, set it to one so that\r\n # the same number (2) cannot be used as the verification number\r\n result2 = int(clean_input(\"Are you sure you want to delete all \"\r\n \"of your saved data\\nEnter {0} to \"\r\n \"proceed, or anything else to \"\r\n \"cancel\".format(str(key))))\r\n if result2 == key:\r\n data_file_w = open(\"C:Item_List.txt\", \"w\")\r\n data_file_w.close()\r\n todo = []\r\n print(\"Save Data Erased\")\r\n return todo # Return an empty list if file load failed\r\n else:\r\n print(\"Program Exiting\")\r\n quit(1)\r\n else:\r\n print(\"Program Exiting\")\r\n quit(1) # Exit the program with the exit code of 1\r\n data_file_r.close()\r\n # All the list functions above referenced from w3schools.com What is\r\n # happening above: Opening the file, initializing a list to hold all\r\n # four pieces of data, then after pulling the data from the file and\r\n # storing in the list, it is copied (not referenced) into my main list\r\n # of ListItem objects\r\n return todo", "def read_file():\n # Create a file object called login_details, and give option to read file\n login_details = open(\"login_details.txt\",\"r\")\n # Create a list containing each line of login_details. List is called contents\n contents = login_details.readlines()\n login_details.close()\n return contents", "def read_file(filename=\"\"):\n with open(filename, encoding=\"UTF-8\") as f:\n for line in f:\n print(line, end='')", "def do_list(self, arg):\n try:\n cprint (\"Here are your todo lists: \\n\", 'blue')\n app.ToDoApp.to_view_todo()\n\n except ValueError as e:\n cprint(e, 'red')", "def diet_retrieve(file_name):\n with open(file_name, \"r\") as f:\n content = f.read()\n return print(content)", "def read_file(filename=\"\"):\n with open(filename, 'r', encoding='utf-8') as fl:\n print(fl.read(), end='')", "def print_access_token_file():\n if AccessData.access_token_file_exists():\n print 'CONTENTS OF %s:' % (AccessData.ACCESS_TOKEN_FILE,)\n with open(AccessData.ACCESS_TOKEN_FILE) as f:\n for line in f.readlines(): print line.strip('\\n')\n else:\n print 'token file %s does not exist' % (AccessData.ACCESS_TOKEN_FILE,)", "def readme(fname='README'):\n with open(os.path.join(os.path.dirname(__file__), fname)) as file:\n return file.read()", "def open_file():\n filepath = askopenfilename(\n filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")]\n )\n if not filepath:\n return\n txt_edit.delete(1.0, tk.END)\n with open(filepath, \"r\") as input_file:\n text = input_file.read()\n txt_edit.insert(tk.END, text)\n window.title(f\"Simple Text Editor - {filepath}\")", "def read_filename(self, filename):\r\n self.text_lines = task3.read_text_file(filename)" ]
[ "0.729495", "0.6513712", "0.6332467", "0.6318393", "0.6303612", "0.62567395", "0.6108343", "0.6103777", "0.60373193", "0.59762686", "0.59441334", "0.58947873", "0.58890957", "0.58715385", "0.5841368", "0.5791715", "0.57458115", "0.57139945", "0.57078445", "0.57034403", "0.5692058", "0.5691547", "0.5627209", "0.5601015", "0.5581231", "0.5569642", "0.55673957", "0.5557207", "0.55564547", "0.5553337" ]
0.70918095
1
This function stikes off the task which is done and deletes it from todo.txt and adds it to done.txt.
def markOff(isdelete = 0): try: taskId = sys.argv[2] tasks = open("todo.txt").readlines() file = open("todo.txt", "w") doneTasks = open("done.txt", "a") flag = True for task in range(len(tasks)): if task + 1 == int(taskId): flag = False if isdelete == 1: continue elif isdelete == 0: data = "x {} {}".format(datetime.today().strftime("%d/%m/%Y"), tasks[task]) doneTasks.write(data) else: file.write(tasks[task]) if not isdelete: if flag:print("Error: todo #%s does not exist." % (taskId)) else:print("Marked todo #%s as done." % (taskId)) if isdelete: if flag:print("Error: todo #%s does not exist. Nothing deleted." %(taskId)) else:print("Deleted todo #%s" % (taskId)) except IndexError: if isdelete:print("Error: Missing NUMBER for deleting todo.") else:print("Error: Missing NUMBER for marking todo as done.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def finish_todo(self, todo):\n self.updated_items.append(todo._replace(done=True))\n print 'completed \"%s\"' % todo.text", "def save_todo_file(self):\n\n if os.path.exists('TODO.txt'):\n os.remove('TODO.txt')\n todo_fp = open('TODO.txt', 'w')\n todo_items = self.todo_scroll_cell.get_item_list()\n in_progress_items = self.in_progress_scroll_cell.get_item_list()\n done_items = self.done_scroll_cell.get_item_list()\n for item in todo_items:\n todo_fp.write(item + '\\n')\n todo_fp.write('__IN_PROGRESS__' + '\\n')\n for item in in_progress_items:\n todo_fp.write(item + '\\n')\n todo_fp.write('__DONE__' + '\\n')\n for item in done_items:\n todo_fp.write(item + '\\n')\n todo_fp.close()\n self.master.show_message_popup('Saved', 'Your TODO list has been saved!')", "def autodel(): #i hate this code so much\n today, tasks = datetime.today(), []\n to_remove_indexes = []\n deleted_tasks = 0\n\n with open(todofile, 'r') as todo:\n tasks = todo.readlines()\n for i, task in enumerate(tasks):\n try:\n task = json.loads(task)\n except json.decoder.JSONDecodeError:\n return False, False\n if task['deadline'] == \"None\": #because i converted to string in adding\n continue\n dline = datetime.strptime(task['deadline'], \"%Y-%m-%d %H:%M:%S\")\n if dline < today and not task['no_del']:\n to_remove_indexes.append(i)\n deleted_tasks += 1\n\n for index in to_remove_indexes[::-1]:\n del tasks[index]\n \n with open(todofile, 'w') as todo:\n for task in tasks:\n todo.write(task)\n \n return deleted_tasks, True", "def mark_as_done(self, task):\n raise NotImplementedError('')", "def add():\n\ttry:\n\t task = sys.argv[2]\n\t file = open(\"todo.txt\", \"a\")\n\t file.write(task + \"\\n\")\n\t print('Added todo: \"{}\"'.format(task))\n\texcept IndexError:\n\t print(\"Error: Missing todo string. Nothing added!\")", "def remove_todo(taskname):\n tasks = []\n found = False #track if todo item was found\n with open(todofile, 'r') as todo:\n tasks = todo.readlines()\n for i, task in enumerate(tasks):\n task = json.loads(task)\n if task['name'] == taskname:\n del tasks[i]\n found = True\n break\n\n with open(todofile, 'w') as todo:\n for task in tasks:\n todo.write(task)\n \n return found", "def change_task(self):\n sel_task = self.find_task()\n if sel_task is False:\n return\n\n # We have a valid task, let's change it.\n self.clear_screen()\n self.display_task(sel_task)\n print \"\\n'd': Mark this task done\"\n print \"'t': Change tags of this task\"\n print \"'x': Remove this task permanently (cannot be undone)\"\n print \"'c': Cancel and return to main menu.\"\n selection = None\n\n # Continue until user cancels\n while selection != 'c':\n selection = raw_input(\n \"Enter command for selected task > \").strip().lower()\n\n if selection == 'd':\n sel_task.mark_done(self.user)\n self.current_collection.archive()\n break\n\n if selection == 't':\n user_input = raw_input(\n \"Overwrite existing tags? y/n > \"\n ).strip().lower()\n if user_input in ('y', 'yes'):\n del sel_task.tags\n user_tags = raw_input(\n \"Enter new tags (comma separated) (optional). > \")\n sel_task.tags = [\n tag.strip() for tag in user_tags.split(',')]\n break\n\n if selection == 'x':\n if raw_input(\"Delete this task? y/n > \") in ('y', 'Y'):\n delete = self.current_collection.delete(sel_task)\n if delete:\n raw_input(\"Task deleted. Press Enter\")\n break\n else:\n raw_input(\"Task not deleted. Try again.\")\n continue\n else:\n print \"Please enter valid command.\"\n return", "def deleteTask():\n\tmarkOff(isdelete = 1)", "def mark_tasks(data):\n try:\n i =0\n while i<30:\n new_date = (date.today() + timedelta(days=i)).strftime(\"%d-%m-%Y\")\n if data.get(new_date)==None:\n i+=1\n continue\n \n os.system(\"clear\")\n message = \"\"\" \\t\\t===============Mark tasks ============== \"\"\"\n print(message)\n print(\"\\nAt date: \", new_date)\n new_lst = [[x[0],idx] for idx,x in enumerate(data[new_date]) if x[1]<0]\n\n for j in range(len(new_lst)):\n print(\"\\t-{} {}\".format(j+1, new_lst[j][0]))\n \n if len(new_lst)==0:\n i+=1\n print(\"\\tNo tasks remaining here.\") \n continue\n\n c1 = int(\"0\"+input(\"\\nChoose the task between [{}-{}] to mark as complete. \\nChoose {} or more to goto next date. \\nChoose 0 to terminate: \".format(min(1,len(new_lst)),len(new_lst), len(new_lst)+1)))\n \n if c1==0:\n break\n elif c1>len(new_lst):\n i+=1\n continue\n \n # c2 = int(input(\"\\nChoose the task between [1-{}] to mark as complete: \".format(len(new_lst))))\n \n data[new_date][new_lst[c1-1][1]][1]*=-1\n print(\"\\n[$$$]Marked as complete!!!\\n\")\n c2 = int(\"0\"+input(\"Mark more here? y(0), n(anything else): \"))\n if c2!=0:\n i+=1\n # json.dump(data,open(TODO_FILE, \"w+\"))\n\n except KeyboardInterrupt:\n print(\"KeyboardInterrupt\")\n except :\n print(\"Some error occurred\")\n finally:\n write_file(data)", "def complete_todo(self, todo: Todo):\n todo.completed = True\n self.todo_client.put_todo(todo)", "def done(self, request, task_id):\n # TODO add unittests for two cases:\n # * when task_id not found\n # * when there are more than one task which hash started from task_id\n identity = self.bot.get_plugin('identity').get_identity_by_request(request)\n tasks = self._get_tasks(identity)\n\n if tasks:\n hashes, min_len = _gen_hashes(tasks)\n filtered_tasks = []\n for h, (dt, about, identity_id) in zip(hashes, tasks):\n if not h.startswith(task_id):\n filtered_tasks.append((dt, about, identity_id))\n\n self._set_tasks(identity, filtered_tasks)\n request.respond('done')", "def remove_todo(self, todo):\n self.deleted_items.append(todo)\n print 'removed \"%s\"' % todo.text", "def task_done(self, done, **kwargs):\n\n # If unknown task, kill execution\n if done not in self.doing:\n return\n\n # Set the task as completed\n self.done.append(self.doing.pop(self.doing.index(done)))\n\n # Call original method\n super(Queue, self).task_done(**kwargs)\n\n # Stop\n return", "def remove(self, task):\n pass", "def set_task_done(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 0)\n\n # Refresh the table\n self.write_tasks_table()", "def task_done(self):\n self.__data[\"status\"] = TASK.DONE # Set status done for task\n self.__data[\"eor\"] = time.time() # Update last end of run\n self.task_changed([\"status\", \"eor\"]) # Send changed event", "def write_todo(self, todo):\n if todo != None:\n print 'added \"%s\"' % todo.text\n self.new_items.append(todo)", "def delete_todo_task(self, id):\n if self.is_todo_table_empty() == False:\n if id > self.get_largest_id() or id < 0:\n print(\"\\ntask with id %s does not exist\\n\" % id)\n else:\n self.conn.execute(\"\"\"DELETE FROM todo WHERE id=?;\"\"\", str(id))\n self.decrement_todo_task_ids(id)\n self.conn.commit()\n else:\n print(\"\\nno tasks to delete!\\n\")", "def getTasks():\n\ttasks = open(\"todo.txt\").readlines()\n\tif len(tasks):\n\t for num in range(len(tasks) - 1, -1, -1):\n\t print(\"[%d] %s\" % (num + 1, tasks[num]), end=\"\")\n\telse:\n\t print(\"There are no pending todos!\")", "def save_tasks(self, task_file):\n\n\t\tutil.save(self.tasklist.tasks, task_file)", "def edit_todo(taskname, deadline, reminder, priority, deleteflag):\n edit_task = ''\n editables = ['deadline', 'reminder', 'priority', 'no_del']\n edited = [deadline, reminder, priority, deleteflag]\n\n if not exists(taskname):\n return False\n\n with open(todofile, 'r') as todo:\n tasks = todo.readlines()\n for task in tasks:\n try:\n task = json.loads(task)\n if taskname == task['name']:\n edit_task = task\n break\n except json.decoder.JSONDecodeError:\n return None\n \n remove_todo(taskname)\n\n #editing the tasks here\n for i, editable in enumerate(editables):\n if edited[i] is not None:\n if edited[i] == \"remove\":\n edit_task[editable] = None\n else:\n edit_task[editable] = edited[i]\n\n #making the task deadline json-serializable\n edit_task['deadline'] = str(edit_task['deadline'])\n\n with open(todofile, 'a') as todo:\n try:\n jdump = json.dumps(edit_task) + '\\n'\n todo.write(jdump)\n except json.decoder.JSONDecodeError:\n return None\n return True", "def task_finished(self, task_id):\n if task_id in self.tasks:\n del self.tasks[task_id]", "def add_task():\n\n yourTask = []\n line = input(\"Add your task: \")\n yourTask.append(line)\n taskfile = open('tasks.txt', 'a')\n for line in yourTask:\n taskfile.write(\"%s\\n\" % line)\n taskfile.close()\n\n import menu", "def end_task(self, caller, id, out):\n\n\t\tself.logger.complete_log(id, out) # complete the logger row of the task\n\t\t#del self.tasks[id] # delete the task from the running task's dict\n\n\t\tself._sync()", "def save_done_file(outdir, filename=None):\n time_tag = current_time_str()\n if filename == None:\n filename = f\"DONE_{time_tag}.txt\"\n outfn = os.path.join(outdir, filename)\n with open(outfn, \"w\") as outf:\n outf.write(f\"DONE at {time_tag}\\n\")", "def do_done(self, arg):\n task = self.db.get_active_task()\n if not task:\n print('There is not an active task.')\n return\n finished = self.db.finish_track(task['track_id'], task['started'])\n rounding = ''\n if config.BT_TIMESHEET_ROUNDING and config.BT_ROUNDING_INCREMENT:\n rounding = \" and rounded to the next %s minute(s)\" % \\\n config.BT_ROUNDING_INCREMENT\n print(u\"The task '{task}#{project}' has been done. {activity} was spent\"\n \"{rounding}.\".format(\n task=task['tname'], project=task['pname'],\n activity=helpers.seconds_to_human(\n (finished - task['started']).total_seconds()),\n rounding=rounding\n )\n )\n self.set_prompt(self.bloody_prompt)", "def add_todo(taskname, deadline, priority, reminder, deleteflag):\n autodel()\n task = {\n 'name': taskname,\n 'deadline': str(deadline),\n 'priority': priority,\n 'reminder': reminder,\n 'no_del': deleteflag\n }\n\n if not exists(task['name']):\n with open(todofile, 'a') as todo:\n try:\n jdump = json.dumps(task) + '\\n'\n todo.write(jdump)\n return 0\n except json.decoder.JSONDecodeError:\n return 1", "def todo():\n print(\"OK\")", "def save_to_file(todoList, doneList):\n\n filePath = os.path.expanduser('~/todoSave')\n f = open(filePath, 'w+')\n # wipe the old save file\n f.truncate(0)\n\n\n obj = {\"todo\": todoList, \"done\": doneList}\n f.write(json.dumps(obj, indent=4))", "def tasks_done(self,tasks):\n self.done_counter += len(tasks)\n for t in tasks:\n if t.status == t.IGNORED:\n self.ignored_counter += 1\n logger.info(\"*** ignored task %d\", t.tid)\n else:\n self.completed_counter += 1\n # use the task output produced by the worker\n self.total_execution_time += t.task_output.execution_time\n logger.info(\"*** completed task %d, total execution time: %f\", t.tid,self.total_execution_time)" ]
[ "0.7079181", "0.69074166", "0.66847503", "0.65578055", "0.6467333", "0.6457318", "0.6359015", "0.6349304", "0.6338304", "0.63093024", "0.6263692", "0.6259077", "0.6224326", "0.6205667", "0.6193628", "0.6182642", "0.6131768", "0.6124231", "0.6107879", "0.60152334", "0.5980346", "0.59750444", "0.5970462", "0.59619623", "0.5911756", "0.5910003", "0.5899132", "0.5894349", "0.5882621", "0.5852351" ]
0.75782037
0
This function need is similar to markOff function so calls it by passing isdelete = 1 as argument.
def deleteTask(): markOff(isdelete = 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def markOff(isdelete = 0):\n\ttry:\n\t taskId = sys.argv[2]\n\t tasks = open(\"todo.txt\").readlines()\n\t file = open(\"todo.txt\", \"w\")\n\t doneTasks = open(\"done.txt\", \"a\")\n\t flag = True\n\t for task in range(len(tasks)):\n\t if task + 1 == int(taskId):\n\t \tflag = False\n\t \tif isdelete == 1:\n\t \t\tcontinue\n\t \telif isdelete == 0:\n\t \t\tdata = \"x {} {}\".format(datetime.today().strftime(\"%d/%m/%Y\"), tasks[task])\n\t \t\tdoneTasks.write(data)\n\t else:\n\t \tfile.write(tasks[task])\n\n\t if not isdelete:\n\t \tif flag:print(\"Error: todo #%s does not exist.\" % (taskId))\n\t \telse:print(\"Marked todo #%s as done.\" % (taskId))\n\t \n\t if isdelete:\n\t \tif flag:print(\"Error: todo #%s does not exist. Nothing deleted.\" %(taskId))\n\t \telse:print(\"Deleted todo #%s\" % (taskId))\n\n\texcept IndexError:\n\t\tif isdelete:print(\"Error: Missing NUMBER for deleting todo.\")\n\t\telse:print(\"Error: Missing NUMBER for marking todo as done.\")", "def beforeDelete(self):", "def delete():", "def deletes(f):\n f.deletes = True\n return f", "def before_delete(self, obj, st):\n pass", "def do_delete(self, arg):\n \treturn False", "def mark_for_delete(self, mark=True):\n updateshopobj = self.sc.get_updateshop_obj(\n {\n 'Alias': self.Alias,\n 'MarkedForDelete': mark,\n }\n )\n self.sc.update(updateshopobj)\n self.refresh()", "def _handleMarkernoChangedDelete(self):\n \n # Get previous markerno\n # update markerno's >prev_markerno to markerno + 1\n # update of_places set markerno = markerno + 1 where territoryno = '4-1-2' and markerno is not null\n x=0\n pass", "def off(self):", "def unmark():\n with CONNECTION:\n CURSOR.execute('DELETE FROM marks')\n return '1'", "def execute(self):\r\n self.changeAttr(\"changeType\", \"delete\")\r\n self.changeAttr(\"changeMark\", \"1\")", "def delete(self, *args, **kwargs):\n return 0", "def off(self) -> None:", "def after_delete(self, obj, st):\n pass", "def delete(self):\n ...", "def delete_order():", "def _Delete(self):\n pass", "def no_op(_, __):\n logger.info(\"Got Delete\")", "def remove():", "def can_fast_delete(self, *args, **kwargs):\n return False", "def undo():", "def run(self, is_delete):\n # only run if input is true and debounced\n if is_delete:\n if not self._active_loop:\n # action command\n self._tub.delete_last_n_records(self._num_records)\n # increase the loop counter\n self._active_loop = True\n else:\n # trigger released, reset active loop\n self._active_loop = False", "def off(self) -> None:\n ...", "def delete(self):\n subprocess.run([\"axicli\", \"--mode\", \"manual\", \"-M\", \"enable_xy\"])\n subprocess.run([\"axicli\", \"--mode\", \"manual\", \"-M\", \"raise_pen\"])\n subprocess.run([\"axicli\", \"--mode\", \"manual\", \"-M\", \"disable_xy\"])\n\n return self.get()", "def remove(func):", "def remove():\n pass", "def delete(self, obj):", "def test_marking_removal(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n \n indicator = Indicator(title=\"Test\")\n package.add_indicator(indicator)\n \n container.add_marking(indicator, red_marking)\n self.assertTrue(container.is_marked(indicator, red_marking))\n\n container.remove_marking(indicator, red_marking)\n self.assertFalse(container.is_marked(indicator, red_marking))", "def delete_callback(self, chain, value):", "def undelete(self,\r\n undeletelist=None,\r\n update_table=True):\r\n\r\n m_temp = iter([a_temp for a_temp\r\n in range(1,len(self.indexes()*2))\r\n if str(a_temp)\r\n not in self.indexes()])\r\n\r\n\r\n # iter function is used to find free spots for the notes to be undeleted\r\n\r\n if undeletelist is None:\r\n undeletelist = [Index(x_temp)\r\n for x_temp in\r\n self.find_within(indexfrom=None,\r\n indexto=0)]\r\n\r\n for u in undeletelist:\r\n print(PERIOD,end=EMPTYCHAR)\r\n\r\n self.move(u,\r\n Index(next(m_temp)),\r\n withchildren=True,\r\n update_table=update_table)\r\n print()" ]
[ "0.6381452", "0.63253415", "0.6070844", "0.6049239", "0.60067105", "0.59252304", "0.5890134", "0.58599454", "0.58161026", "0.5758552", "0.5754594", "0.5730884", "0.5698445", "0.56965846", "0.5640858", "0.5552903", "0.5552888", "0.5528425", "0.5517398", "0.54960984", "0.54510856", "0.54231256", "0.5398041", "0.5383845", "0.5369938", "0.53613645", "0.53457105", "0.5320019", "0.5312657", "0.52780336" ]
0.6508327
0
This is used by the main method to change the smoothing parameter before training. Do not modify this method.
def setSmoothing(self, k): self.k = k
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setSmoothing(self, k):\n\tself.k = k", "def setSmoothing(self, k):\n self.k = k", "def __init__(self, smoothing=0.1):\n super(LabelSmoothing, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing", "def __init__(self, smoothing=0.0):\n super(LabelSmoothing, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing", "def __init__(self, smoothing=0.0):\n super(LabelSmoothing, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing", "def setOptimizerParams(self,lr,momentum,decay):\n self.optimizer = SGD(lr=lr,momentum=momentum,decay=decay)", "def smoother(self):\n ok ,tchi2= True,0.\n if (self.status != 'filter'):\n warning('kfilter no smoothing as it is not filter!')\n debug(\"kfilter.smoother ok,chi2 \",(False,tchi2))\n return False,tchi2\n fstate = self.nodes[-1].getstate('filter')\n self.nodes[-1].setstate('smooth',fstate.copy())\n self.nodes[-1].setchi2('smooth',self.nodes[-1].getchi2('filter'))\n ks = range(0,len(self.nodes)-1)\n ks.reverse()\n for k in ks:\n node = self.nodes[k]\n node1 = self.nodes[k+1]\n sstate,schi2 = node.smooth(node1)\n node.setstate('smooth',sstate) \n node.setchi2('smooth',schi2) \n self.model.user_smooth(node)\n tchi2+=schi2\n self.status='smooth'\n debug(\"kfilter.smooth ok,chi2 \",(ok,tchi2))\n return ok,tchi2", "def on_epoch_start(self, state: _State):\n optimizer = self._optimizer\n\n if self.decouple_weight_decay:\n self._optimizer_wd = [\n group.get(\"weight_decay\", 0.0)\n for group in optimizer.param_groups\n ]\n for i in range(len(optimizer.param_groups)):\n optimizer.param_groups[i][\"weight_decay\"] = 0.0\n else:\n self._optimizer_wd = [0.0] * len(optimizer.param_groups)", "def adjust_parameters(self, mini_batch_size):\n\n pass", "def smooth_opt(array, smoothing_kernel=None, smoothing_type=\"gaussian\"):\n if smoothing_kernel is not None:\n if smoothing_type == \"gaussian\":\n array = filters.gaussian_filter(array, smoothing_kernel)\n else:\n raise ValueError(\n f\"Smoothing type: '{smoothing_type}' is not \" f\"supported\"\n )\n return array", "def update_params(self):\n if self.clip > 0:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)\n self.optimizer.step()", "def clicked_checkbox_model_smoothing(self):\n if self.checkbox_model_smoothing.isChecked():\n self._get_selected_model().metadata[\"smoothing_kernel\"] = True\n self.edit_manual_smoothing.setEnabled(False)\n else:\n self._get_selected_model().metadata[\"smoothing_kernel\"] = False\n self.edit_manual_smoothing.setEnabled(True)\n return None", "def update_parameters(parameters, grads, learning_rate):\n pass", "def toggle_excess_smoothing(self) -> None:\n if self.msg.sender != self.owner:\n revert(\"This method can only be invoked by the score owner. You are trying for unauthorized access\")\n self._excess_smoothing_live.set(not self._excess_smoothing_live.get())", "def set_params(self, **params):\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n else:\n self.kwargs.update(params[\"kwargs\"])\n self.save_smooth = params.get(\"save_smooth\", self.save_smooth)\n\n return self", "def _adjust(self):\n self.updateView()\n\n self.setDescription(\n f\"Smoothing Factor: x{self.smoothingFactor()}\\n\"\n f\"Method: {self.method()}\\n\"\n f\"Smoother type: {self.smoother_type_to_name[self.smootherType()]}\"\n )", "def set_optimizer(self, probe):\n if 'weight_decay' in self.args['probe_training']:\n weight_decay = self.args['probe_training']['weight_decay']\n else:\n weight_decay = 0\n if 'scheduler_patience' in self.args['probe_training']:\n scheduler_patience = self.args['probe_training']['scheduler_patience']\n else:\n scheduler_patience = 0\n \n learning_rate = 0.001 if not 'learning_rate' in self.args['probe_training'] else\\\n self.args['probe_training']['learning_rate']\n \n scheduler_factor = 0.5 if not 'scheduler_factor' in self.args['probe_training'] else\\\n self.args['probe_training']['scheduler_factor']\n\n self.optimizer = optim.Adam(probe.parameters(), lr=learning_rate, weight_decay=weight_decay)\n self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,\n mode='min',\n factor=scheduler_factor,\n patience=scheduler_patience)", "def set_optimizer(self, probe):\n if 'weight_decay' in self.args['probe_training']:\n weight_decay = self.args['probe_training']['weight_decay']\n else:\n weight_decay = 0\n if 'scheduler_patience' in self.args['probe_training']:\n scheduler_patience = self.args['probe_training']['scheduler_patience']\n else:\n scheduler_patience = 0\n \n learning_rate = 0.001 if not 'learning_rate' in self.args['probe_training'] else\\\n self.args['probe_training']['learning_rate']\n \n scheduler_factor = 0.5 if not 'scheduler_factor' in self.args['probe_training'] else\\\n self.args['probe_training']['scheduler_factor']\n\n self.optimizer = optim.Adam(probe.parameters(), lr=learning_rate, weight_decay=weight_decay)\n self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,\n mode='min',\n factor=scheduler_factor,\n patience=scheduler_patience)", "def office_add_smoothed_kernels(parser, args, params):\n parser.add_argument('--max_perturbation', type=str,\n help='Amount by which to scale the velocity updates',\n metavar='', required=True)\n local_args = parser.parse_known_args(args)\n max_perturbation = local_args[0].max_perturbation\n\n control.add_smoothed_kernels(params, max_perturbation)", "def __init__(self, l_smooth = LS):\n self.l_smooth = l_smooth", "def _set_train_params(self,\n lr: float = 1e-3,\n l2norm: float = 1e-2,\n ):\n self.lr = lr\n self.l2norm = l2norm\n self.optimizer = torch.optim.Adam(\n self.model.parameters(), lr=lr, weight_decay=l2norm)", "def tune_params(self, X_train, Y_train):\n return self.model # No hyper-parameter tuning", "def smoothing_mode(self, smoothing_mode):\n if smoothing_mode is None:\n raise ValueError(\"Invalid value for `smoothing_mode`, must not be `None`\")\n allowed_values = [\"Default\", \"HighSpeed\", \"HighQuality\", \"None\", \"AntiAlias\", \"Invalid\"]\n if not smoothing_mode.isdigit():\n if smoothing_mode not in allowed_values:\n raise ValueError(\n \"Invalid value for `smoothing_mode` ({0}), must be one of {1}\"\n .format(smoothing_mode, allowed_values))\n self._smoothing_mode = smoothing_mode\n else:\n self._smoothing_mode = allowed_values[int(smoothing_mode) if six.PY3 else long(smoothing_mode)]", "def set_parameters(self, **kwargs):\n self.__select_k_best.set_params(**kwargs)", "def __init__(self, smoothing=0.1):\n super(LabelSmoothingCrossEntropy, self).__init__()\n assert smoothing < 1.0\n self.smoothing = smoothing\n self.confidence = 1. - smoothing", "def smooth(*args, numiter=1) -> core.Smooth:\n X, Y, kws = util.parseargs(*args)\n return core.Smooth(X, Y, numiter=numiter)", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def _reset_params(self):\n for p in self.parameters():\n if p.dim() > 1:\n torch.nn.init.xavier_normal_(p)" ]
[ "0.7056522", "0.66494966", "0.6458589", "0.6430003", "0.6430003", "0.63708806", "0.63073367", "0.614149", "0.61360747", "0.6081798", "0.605832", "0.595308", "0.58814126", "0.5877647", "0.5857185", "0.58429444", "0.5836974", "0.5836974", "0.5832468", "0.5815381", "0.5812071", "0.57756305", "0.5761257", "0.57384884", "0.57211906", "0.56940174", "0.5692968", "0.5692968", "0.5692968", "0.5687977" ]
0.7078726
0
Returns the logjoint distribution over legal labels and the datum. Each logprobability should be stored in the logjoint counter, e.g. logJoint[3] = To get the list of all possible features or labels, use self.features and self.legalLabels.
def calculateLogJointProbabilities(self, datum): logJoint = util.Counter() "*** YOUR CODE HERE ***" #Adds log(P(y)) to calculate P(y|f1,f2...) for label in self.legalLabels: logJoint[label] += math.log(self.prior[label]) #Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...) for key in datum: #if key == (7, 3): #print self.condprobs[key, 0] for label in self.legalLabels: #print str(key) + str(datum[key]) logJoint[label] += math.log(self.condprobs[key, label][datum[key]]) return logJoint
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self.conditional_prob[label][feature][value]\n\t\t\tif cp > 0: #condition check for values < 0 because log(0) is undefined and math domain error occurs\n\t\t\t\tlogJoint[label] += math.log(cp) #summing up\n\t\t\t\t\n\treturn logJoint", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n for cls in self.classes:\n class_probability = self.prior_prob[cls]\n for key, value in datum.items():\n relative_feature_values = self.likelihoods[cls][key]\n class_probability += math.log(relative_feature_values.get(datum[key], 0.01))\n\n logJoint[cls] = class_probability\n\n return logJoint", "def log_joint(self):\n return sum([\n self.log_marg_like(self.gamma, self.gamma0, self.lamb, self.nu),\n self._gamma0_distribution.logpdf(self.gamma0),\n self._nu_distribution.logpdf(self.nu),\n self._lambda_distribution.logpdf(self.lamb),\n self.probit_distribution(self.xi).logpdf(self.gamma),\n self._xi_distribution.logpdf(self.xi) if self.sample_xi else 0.0\n ])", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint", "def joint_logpdf(self, x1, x2 = None):\n dists = self.conditionalMVNs\n joint_pdfs = np.array([d.joint_pdf(x1, x2) for d in dists])\n return np.log(np.sum(self.weights * joint_pdfs))", "def get_log_prob(self, latent, obs):\n return self.get_log_prob_from_latent_dist(self.get_latent_dist(obs), latent)", "def get_log_probss(self, latent, obs, obs_id):\n\n if self.use_alphabet:\n obs, alphabet = obs\n else:\n alphabet = None\n\n num_particles, batch_size, num_arcs, _ = latent.shape\n _, num_rows, num_cols = obs.shape\n latent_log_prob = self.get_latent_dist(alphabet).log_prob(latent)\n obs_dist = self.get_obs_dist(latent.view(num_particles * batch_size, num_arcs, 2))\n if hasattr(obs_dist, \"log_prob_with_id\"):\n obs_log_prob = obs_dist.log_prob_with_id(\n obs[None]\n .expand(num_particles, batch_size, num_rows, num_cols)\n .reshape(num_particles * batch_size, num_rows, num_cols),\n obs_id[None].expand(num_particles, batch_size).reshape(num_particles * batch_size),\n ).view(num_particles, batch_size)\n else:\n obs_log_prob = obs_dist.log_prob(\n obs[None]\n .expand(num_particles, batch_size, num_rows, num_cols)\n .reshape(num_particles * batch_size, num_rows, num_cols)\n ).view(num_particles, batch_size)\n\n if hasattr(self, \"likelihood_weight\"):\n obs_log_prob = obs_log_prob * self.likelihood_weight\n\n return latent_log_prob, obs_log_prob", "def log_joint(self, sample_dim=None, batch_dim=None, nodes=None):\n if nodes is None:\n nodes = self._nodes\n log_prob = 0.0\n for n in nodes:\n if n in self._nodes:\n node = self._nodes[n]\n log_p = batch_sum(node.log_prob,\n sample_dim,\n batch_dim)\n if batch_dim is not None and node.mask is not None:\n log_p = log_p * node.mask\n log_prob = log_prob + log_p\n return log_prob", "def logq_joint(self, x, h, return_mu=False):\n logph = distributions.Normal(0, 1).log_prob(h).sum(1)\n gmu = self.g(h)\n px_given_h = distributions.Normal(gmu, self.logsigma.exp())\n logpx_given_h = px_given_h.log_prob(x).flatten(start_dim=1).sum(1)\n if return_mu:\n return logpx_given_h + logph, gmu\n else:\n return logpx_given_h + logph", "def log_likelihood(self, x):\n # set nuisance parameters to their central values!\n predictions = self.get_predictions(self.shortarray_to_array(x), nuisance=False)\n m_obj = flavio.Measurement['Pseudo-measurement for FastFit instance: ' + self.name]\n m_obs = m_obj.all_parameters\n prob_dict = m_obj.get_logprobability_all(predictions)\n ll = sum(prob_dict.values())\n return ll", "def log_likelihood(self):\n\n if self._log_likelihood is None:\n self._log_likelihood = logpdf(x=self.y, cov=self.S)\n return self._log_likelihood", "def get_log_prob(self, latent, obs, obs_id, get_accuracy=False):\n if self.use_alphabet:\n obs, alphabet = obs\n else:\n alphabet = None\n num_particles, batch_size, num_arcs, _ = latent.shape\n _, num_rows, num_cols = obs.shape\n latent_log_prob = self.get_latent_dist(alphabet).log_prob(latent)\n\n obs_dist = self.get_obs_dist(latent.view(num_particles * batch_size, num_arcs, 2))\n if hasattr(obs_dist, \"log_prob_with_id\"):\n obs_log_prob, accuracy = obs_dist.log_prob_with_id(\n obs[None]\n .expand(num_particles, batch_size, num_rows, num_cols)\n .reshape(num_particles * batch_size, num_rows, num_cols),\n obs_id[None].expand(num_particles, batch_size).reshape(num_particles * batch_size),\n get_accuracy=True,\n )\n obs_log_prob = obs_log_prob.view(num_particles, batch_size)\n else:\n obs_log_prob = obs_dist.log_prob(\n obs[None]\n .expand(num_particles, batch_size, num_rows, num_cols)\n .reshape(num_particles * batch_size, num_rows, num_cols)\n ).view(num_particles, batch_size)\n accuracy = None\n\n if hasattr(self, \"likelihood_weight\"):\n obs_log_prob = obs_log_prob * self.likelihood_weight\n\n if get_accuracy:\n return latent_log_prob + obs_log_prob, accuracy\n else:\n return latent_log_prob + obs_log_prob", "def joint_pdf(self, x1, x2 = None):\n return np.exp(self.joint_logpdf(x1, x2))", "def log_data_prob(self, x):\n _dist = norm(self.data, self.err)\n lp = _dist.logpdf(x)\n for i in range(6):\n lp[np.isnan(lp[:,i]),i] = self.default_priors[np.isnan(lp[:,i]),i]\n\n return lp.sum(axis=1)", "def target(w, z):\n return log_joint(data_dim=data_dim,\n latent_dim=latent_dim,\n num_datapoints=num_datapoints,\n stddv_datapoints=stddv_datapoints,\n w=w, z=z, x=x_train)", "def loglikelihood(self):\n raise NotImplementedError(\"To be implemented\")", "def get_log_prob(self, X, target=None):\n\n # We don't support the target argument for now.\n assert target is None\n\n batch_size, seq_len, dim = X.size()\n X = X.contiguous().view(-1, dim)\n\n head_y = self.head(X)\n # log_probs = head_y.new_zeros(X.size(0), self.vocab_size)\n\n head_size = self.cutoff[0] + len(self.tail)\n head_log_probs = self.lsm(head_y)\n log_probs_list = [head_log_probs[:, :self.cutoff[0]]]\n\n if len(self.tail) > 0:\n tail_priors = head_log_probs[:, self.cutoff[0]:head_size]\n\n for i in range(len(self.tail)):\n tail_i = self.lsm(self.tail[i](X))\n tail_i = tail_i + tail_priors[:, i, None]\n log_probs_list.append(tail_i)\n\n log_probs = torch.cat(log_probs_list, dim=1)\n log_probs = log_probs.view(batch_size, seq_len, self.vocab_size)\n return log_probs", "def likelihood(self):\n\n # assert the Gaussian process is up to date\n self._gp_up_to_date()\n\n noise_penalization_term = -1 / 2 * np.log(\n np.linalg.det(self.cov_matrix))\n\n y = np.linalg.solve(self.cov_matrix, self.list_y)\n y = np.array(self.list_y) @ y\n data_fidelity_term = -1 / 2 * y\n\n nbr_obs_term = - self.n_observation * np.log(2 * np.pi)\n likelihood = (\n noise_penalization_term + data_fidelity_term + nbr_obs_term\n )\n return likelihood", "def log_likelihood(self, points):\n\t\tpoint_set = list(points)\n\t\tlog_probabilities = [np.log(self.density(point)) for point in point_set]\n\t\treturn sum(log_probabilities)", "def log_likelihood(self):\r\n return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -\r\n 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)", "def get_log_prob_from_latent_dist(self, latent_dist, latent):\n return latent_dist.log_prob(latent)", "def log_likelihood(self):\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z", "def log_likelihoods(self):\n return self.__data_frame.loc[:, \"ll\":\"ll\"].values[:-1]", "def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')", "def log_likelihood(parameters):\n if len(copula.bounds_param) == 1:\n params = [parameters]\n else:\n param1, param2 = parameters\n params = [param1, param2]\n logl = -np.sum(np.log(copula.get_pdf(psd_obs[0], psd_obs[1], params)))\n return logl", "def log_likelihood(self) -> tf.Tensor:\n # K⁻¹ + GᵀΣ⁻¹G = LLᵀ.\n l_post = self._k_inv_post.cholesky\n num_data = self.observations_index.shape[0]\n\n # Hμ [..., num_transitions + 1, output_dim]\n marginal = self.emission.project_state_to_f(self.prior_ssm.marginal_means)\n marginal = self._drop_batch_shape(marginal)\n\n # y = obs - Hμ [..., num_transitions + 1, output_dim]\n disp = self.observations - marginal\n disp_data = self.sparse_observations - self.dense_to_sparse(marginal)\n\n # cst is the constant term for a gaussian log likelihood\n cst = (\n -0.5 * np.log(2 * np.pi) * tf.cast(self.emission.output_dim * num_data, default_float())\n )\n\n term1 = -0.5 * tf.reduce_sum(\n input_tensor=tf.einsum(\"...op,...p,...o->...o\", self._r_inv_data, disp_data, disp_data), axis=[-1, -2]\n )\n\n # term 2 is: ½|L⁻¹(GᵀΣ⁻¹)y|²\n # (GᵀΣ⁻¹)y [..., num_transitions + 1, state_dim]\n obs_proj = self._back_project_y_to_state(disp)\n\n # ½|L⁻¹(GᵀΣ⁻¹)y|² [...]\n term2 = 0.5 * tf.reduce_sum(\n input_tensor=tf.square(l_post.solve(obs_proj, transpose_left=False)), axis=[-1, -2]\n )\n\n ## term 3 is: ½log |K⁻¹| - log |L| + ½ log |Σ⁻¹|\n # where log |Σ⁻¹| = num_data * log|R⁻¹|\n term3 = (\n 0.5 * self.prior_ssm.log_det_precision()\n - l_post.abs_log_det()\n + 0.5 * self._log_det_observation_precision\n )\n\n return tf.reduce_sum(cst + term1 + term2 + term3)", "def logpdf(self, X, pool=None):\n logpdfs = []\n for logweight, space, kde in zip(self._logweights,\n self._spaces,\n self._kdes):\n # Calculate the probability for each parameter space individually\n if np.all(space == ~X.mask) and np.isfinite(logweight):\n logpdfs.append(logweight + kde(X[space], pool=pool))\n\n return logsumexp(logpdfs, axis=0)", "def log_likelihood(self, theta):\n raise NotImplementedError()", "def log_marginal_likelihood(self) -> tf.Tensor:\n X, Y = self.data\n Y = Y[..., :-1]\n K = self.kernel(X)\n ks = self._add_noise_cov(K)\n L = tf.linalg.cholesky(ks)\n m = self.mean_function(X)\n\n # [R,] log-likelihoods for each independent dimension of Y\n log_prob = gpflow.logdensities.multivariate_normal(Y, m, L)\n return tf.reduce_sum(log_prob)", "def logpdf(self, X) -> np.ndarray:\n return self.dist.logpdf(self.inv_trans(X))" ]
[ "0.7670301", "0.7324698", "0.72858876", "0.70940316", "0.66565716", "0.63166475", "0.62618005", "0.61345506", "0.60189897", "0.59206444", "0.58990693", "0.5837745", "0.5819782", "0.58036715", "0.5777778", "0.5769963", "0.573551", "0.5727573", "0.5705319", "0.5704481", "0.5689909", "0.56791085", "0.56598693", "0.5628257", "0.5622865", "0.56117934", "0.5599948", "0.5594762", "0.55837524", "0.55781704" ]
0.743221
1
Extract phrases from CSV and tokenize files. Add duplicate phrases only once.
def extract_phrases(phrase_dict, csv_reader, word_list): count_row = 0 for row in csv_reader: phrase = row[3] count_row += 1 if phrase not in all_phrases: tokens = tokenizer(phrase) tokens = list(tokens) phrase_dict[phrase] = tokens for tok in tokens: if tok not in words: words.append(tok) #print(count_row)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_process(keyphrases):\n processed_keyphrases = []\n\n # Remove duplicates from the single phrases which are occurring in multi-keyphrases\n multi_phrases = [phrases for phrases in keyphrases if len(phrases[0].split()) > 1]\n single_phrase = [phrases for phrases in keyphrases if len(phrases[0].split()) == 1]\n for tup in single_phrase:\n kw = tup[0]\n for tup_m in multi_phrases:\n kw_m = tup_m[0]\n r = kw_m.find(kw)\n if r > -1:\n try:\n single_phrase.remove(tup)\n except:\n continue\n\n # Remove same word occurrences in a multi-keyphrase\n for multi_key, multi_score in multi_phrases:\n kw_m = multi_key.split()\n unique_kp_list = list(dict.fromkeys(kw_m))\n multi_keyphrase = ' '.join(unique_kp_list)\n processed_keyphrases.append((multi_keyphrase, multi_score))\n\n processed_keyphrases.extend(single_phrase)\n\n return processed_keyphrases", "def process_raw_phrases(file_path):", "def preprocess(self):\n\n self._build_labels_dict(['one', 'two', 'three', 'four', 'five'])\n\n with open(self.data_path + self.file_name, 'rb') as csvfile:\n\n reader = csv.reader(csvfile, delimiter=\",\")\n for row in reader:\n self.texts.append(row[1])\n self.labels.append(self.labels_index[row[0]])\n\n print('Found %s texts.' % len(self.texts))", "def get_tweets_ids_from_csv(self):\n for fname in self.filelist:\n with open(fname) as f:\n fakenews = csv.reader(f)\n next(fakenews) # Discard top CSV row\n for fake in fakenews:\n yield fake[3].split(\"\t\")", "def train(self, filename):\n with open(filename, 'r') as f:\n phrases_and_words = []\n\n for index, line in enumerate(f):\n # decoding, since input is not unicode\n cleaned_line = self.get_cleaned_line(line.decode('utf-8', 'ignore'))\n\n if cleaned_line:\n phrases_and_words.extend(self.get_phrase_and_words_from_line(cleaned_line))\n\n if index % 10000 == 0:\n self.db_storage.store_phrases_and_words(phrases_and_words)\n phrases_and_words = []\n\n self.db_storage.store_phrases_and_words(phrases_and_words)", "def extractWords(self, inputDataset):\n reviewFile = open(inputDataset, \"r\", encoding=\"utf-8-sig\")\n for record in reviewFile:\n record = record.strip().split(\"\\t\") # tab-delimited .txt file\n self.addUnigrams(int(record[0]), record[1])\n reviewFile.close()", "def train(filename, supervised=False):\n p = re.compile(r'[a-zA-Z0-9_:;,.\"?\\' ]+')\n data = []\n all_text = {}\n infile = csv.DictReader(open(filename), delimiter=',', quotechar='\"')\n for row in infile:\n text_id = row['id']\n text = row['text']\n author = row['author'] if supervised else None\n\n # remove special characters\n new_text = ''\n for word in text:\n for letter in word:\n reg = p.match(letter)\n if reg is not None:\n new_text += reg.group()\n\n data.append((text_id, new_text, author))\n if supervised:\n if author not in all_text.keys():\n all_text[author] = ''\n else:\n sentences = sentence_tokenizer.tokenize(new_text)\n all_text[author] += ' '.join(sentences) + ' '\n # print(\"{} {} {}\".format(text_id, text, author))\n if supervised:\n return data, all_text\n else:\n return data", "def tokenize(self, path, build_dict=False, thd=0):\n\n assert os.path.exists(path)\n\n if build_dict:\n # Add words to the dictionary\n with open(path, 'r') as f:\n for line in f:\n words = line.split()\n for word in words:\n self.dictionary.add_word(word)\n if thd > 1:\n self.dictionary.rebuild_by_freq(thd)\n\n # Tokenize file content\n ids_list = []\n with open(path, 'r') as f:\n for line in f:\n words = line.split()\n ids = []\n for word in words:\n ids.append(self.dictionary[word])\n ids_list.append(ids)\n\n return ids_list", "def purify_comments(csv_file, keep_stops=False, POS=False, lemmatize=False, popular=0):\r\n\r\n df = pd.read_csv(csv_file)\r\n df = df.loc[df[\"author\"] != \"[deleted]\"] # trim out comments whose authors have deleted their accounts\r\n df = df.loc[df[\"score\"] != \"score\"] # this is an error in the code when building new csv_files from dask\r\n\r\n # extracts only the popular comments\r\n if popular > 0:\r\n df = df.loc[pd.to_numeric(df[\"score\"]) > popular]\r\n\r\n comments = df[\"body\"]\r\n del df # no need for this anymore, and it'll merely eat up memory\r\n\r\n nlp = en_core_web_sm.load()\r\n\r\n revised_comments = []\r\n for comment in comments.astype('unicode').values:\r\n comment = comment[1:] # remove the initial 'b' bytes-representation character\r\n comment = comment.encode(\"utf-8-sig\").decode(\"utf-8-sig\") # get rid of BOM character\r\n comment = comment.lower().replace(r\"\\n\", r\"\").replace(r'\"', r'')\r\n\r\n tokens = nlp(comment)\r\n\r\n # actual specification section\r\n for sent in tokens.sents:\r\n\r\n if POS: # conversion of comments to tokens/lemmas-POS tags\r\n if lemmatize:\r\n if keep_stops:\r\n revised_tokens = [\"{}-{}\".format(token.lemma_, token.tag_) for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [\"{}-{}\".format(token.lemma_, token.tag_) for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n else:\r\n if keep_stops:\r\n revised_tokens = [\"{}-{}\".format(token.orth_, token.tag_) for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [\"{}-{}\".format(token.orth_, token.tag_) for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n\r\n elif lemmatize: # just lemmatization\r\n if keep_stops:\r\n revised_tokens = [token.lemma_ for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [token.lemma_ for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n\r\n else: # nothing but removal of stop words (or not)\r\n if keep_stops:\r\n revised_tokens = [token.orth_ for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [token.orth_ for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n\r\n revised_comments.append(\" \".join(revised_tokens))\r\n\r\n return pd.Series(revised_comments)", "def main(directory, csv_file, task_name):\n csv_data = pd.read_csv(csv_file)\n colnames = csv_data.columns.tolist()\n\n edat_files = glob.glob(directory + \"*.edat*\")\n text_files = glob.glob(directory + \"*-*.txt\")\n all_files = edat_files + text_files\n pairs = []\n paired_texts = []\n\n for text_file in text_files:\n [text_fname, _] = os.path.splitext(text_file)\n for edat_file in edat_files:\n [edat_fname, _] = os.path.splitext(edat_file)\n if text_fname == edat_fname:\n pairs.append([text_file, edat_file])\n\n for pair in pairs:\n paired_texts.append(pair[0])\n\n unpaired_texts = list(set(text_files) - set(paired_texts))\n three_files = []\n pop_idx = []\n\n # List of lists\n for i_file in range(len(unpaired_texts)):\n for j_pair in range(len(paired_texts)):\n if (unpaired_texts[i_file][:len(unpaired_texts[i_file])-6] in paired_texts[j_pair]):\n three_files.append([paired_texts[j_pair], pairs[j_pair][1],\n unpaired_texts[i_file]])\n pop_idx.append(i_file)\n\n for rm in reversed(pop_idx):\n unpaired_texts.pop(rm)\n\n # three_files is the text files and edats that form a triad (one edat, two\n # similarly named text files).\n for triad in three_files:\n for i_pair in reversed(range(len(pairs))):\n if triad[0:2] == pairs[i_pair]:\n pairs.pop(i_pair)\n\n two_texts = []\n all_two_texts = []\n two_text_pairs = []\n\n for i_file in range(len(unpaired_texts)):\n for j_file in range(i_file + 1, len(unpaired_texts)):\n if (unpaired_texts[i_file][:len(unpaired_texts[i_file])-6] in unpaired_texts[j_file]):\n all_two_texts.append(i_file)\n all_two_texts.append(j_file)\n two_text_pairs.append([i_file, j_file])\n\n all_two_texts = sorted(all_two_texts, reverse=True)\n\n # two_texts is the text files that pair with other text files.\n for i_pair in range(len(two_text_pairs)):\n two_texts.append([unpaired_texts[two_text_pairs[i_pair][0]],\n unpaired_texts[two_text_pairs[i_pair][1]]])\n\n for i_file in all_two_texts:\n unpaired_texts.pop(i_file)\n\n # one_text is the remaining un-paired text files.\n one_text = [[unpaired_texts[i_file]] for i_file in range(len(unpaired_texts))]\n\n # Determine subject IDs and timepoints for all files.\n # Assumes that files will be named according to convention\n # blahblahblah_[subj]-[tp].txt or blahblahblah-[subj]-[tp].txt.\n one_text_subjects = [get_subject(file_[0]) for file_ in one_text]\n one_text_timepoints = [get_timepoint(file_[0]) for file_ in one_text]\n two_text_subjects = [get_subject(pair[0]) for pair in two_texts]\n two_text_timepoints = [get_timepoint(pair[0]) for pair in two_texts]\n three_file_subjects = [get_subject(triad[0]) for triad in three_files]\n three_file_timepoints = [get_timepoint(triad[0]) for triad in three_files]\n pair_subjects = [get_subject(pair[0]) for pair in pairs]\n pair_timepoints = [get_timepoint(pair[0]) for pair in pairs]\n\n af_files = ([item for sublist in pairs for item in sublist] +\n [item for sublist in two_texts for item in sublist] +\n [item for sublist in three_files for item in sublist] +\n [item for sublist in one_text for item in sublist])\n\n one_edat = list(set(all_files) - set(af_files))\n one_edat = [[edat] for edat in one_edat]\n one_edat_subjects = [get_subject(file_[0]) for file_ in one_edat]\n one_edat_timepoints = [get_timepoint(file_[0]) for file_ in one_edat]\n\n all_subjects = (one_text_subjects + two_text_subjects + three_file_subjects +\n pair_subjects + one_edat_subjects)\n all_notetype = (([\"one_text\"] * len(one_text_subjects)) +\n ([\"two_texts\"] * len(two_text_subjects)) +\n ([\"three_files\"] * len(three_file_subjects)) +\n ([\"pair\"] * len(pair_subjects)) +\n ([\"one_edat\"] * len(one_edat_subjects)))\n all_timepoints = (one_text_timepoints + two_text_timepoints +\n three_file_timepoints + pair_timepoints +\n one_edat_timepoints)\n all_file_sets = one_text + two_texts + three_files + pairs + one_edat\n\n organized_dir = org_dir_dict.get(task_name)\n\n for i_subj in range(len(all_subjects)):\n month = timepoint_dict.get(task_name).get(all_timepoints[i_subj])\n files_note = note_dict.get(all_notetype[i_subj])\n if len(all_subjects) > 4:\n try:\n print(\"Successfully organized %s-%s\" % (all_subjects[i_subj], month))\n print(\"Moved:\")\n subject_id = all_subjects[i_subj]\n files = all_file_sets[i_subj]\n note = organize_files(subject_id, month, files, organized_dir)\n note.append(files_note)\n orged = 1\n orgedwhen = time.strftime(\"%Y/%m/%d\")\n orgedby = \"PY\"\n except IOError:\n print(\"%s-%s couldn't be organized.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n note = files_note\n orged = 0\n orgedwhen = \"\"\n orgedby = \"\"\n\n try:\n if all_notetype[i_subj] == \"pair\":\n print(\"Successfully converted %s-%s\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 1\n convedwhen = time.strftime(\"%Y/%m/%d\")\n convedby = \"PY\"\n else:\n print(\"%s-%s couldn't be converted.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 0\n convedwhen = \"\"\n convedby = \"\"\n except IOError:\n print(\"%s-%s couldn't be converted.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 0\n convedwhen = \"\"\n convedby = \"\"\n else:\n print(\"%s-%s couldn't be organized.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n note = files_note\n orged = 0\n orgedwhen = \"\"\n orgedby = \"\"\n print(\"%s-%s couldn't be converted.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 0\n convedwhen = \"\"\n convedby = \"\"\n\n csv_data = add_subject(csv_data, all_subjects[i_subj],\n all_timepoints[i_subj], orged, orgedwhen, orgedby,\n conved, convedwhen, convedby, note)\n\n csv_data = csv_data[colnames]\n csv_data.to_csv(csv_file, index=False)", "def process_file(path_in, path_out, threshold):\n infile = open(path_in, \"r\", encoding=\"utf-8\")\n outfile = open(path_out, \"w\", encoding=\"utf-8\")\n csv_reader = csv.reader(infile)\n csv_writer = csv.writer(outfile)\n for i, line in enumerate(csv_reader):\n try:\n text_id, text, masked, label_binary, label_ternary, label_finegrained, source = line\n except ValueError:\n if line == ['Place for parser output']:\n pass\n else:\n import pdb; pdb.set_trace()\n if print_only:\n print(check_sentences(text, threshold, print_only))\n else:\n # return False\n swiss_text = check_sentences(text, threshold)\n if i % 10000 == 0:\n print(\"Processed line #{}\".format(i) + \" {}\".format(text))\n if swiss_text:\n csv_writer.writerow([text_id, text, masked, label_binary, label_ternary, label_finegrained, source])\n infile.close()\n outfile.close()", "def raw_text_to_tokenized_phrases(raw_phrases, language='english'):\n tokenized = nltk.sent_tokenize(raw_phrases, language)\n return [tokenize_phrase(Phrase(phrase)) for phrase in tokenized]", "def pre_process_multispace(filepath, delimiter=\" \"):\n newpath = filepath+\".rev.csv\"\n with open(filepath, \"r\") as src_csv_file:\n with open(newpath, \"w\") as dst_csv_file:\n for src_line in src_csv_file:\n dst_csv_file.write(delimiter.join(src_line.split())+\"\\n\")", "def annotate_phrases(batch_size=100):\n\n with open(EXCLUDED, \"r\", encoding=\"utf-8\") as tfile:\n excluded = set(tfile.read().splitlines())\n\n with open(INCLUDED, \"r\", encoding=\"utf-8\") as tfile:\n included = set(tfile.read().splitlines())\n\n with open(PHRASE_DUMP, \"r\", encoding=\"utf-8\") as tfile:\n pending = tfile.read().splitlines()\n pending = [x.split(\", \") for x in pending]\n\n pending = [phrase_line for phrase_line in pending if phrase_line[0] not in excluded and phrase_line[0] not in included][:batch_size]\n pending_choices = [phrase_line[0] for phrase_line in pending]\n marked_excluded = easygui.multchoicebox(msg=\"Mark Excluded\", choices=pending_choices)\n if not marked_excluded:\n marked_excluded = []\n for marked in marked_excluded:\n excluded.add(marked)\n\n with open(EXCLUDED, \"w+\", encoding=\"utf-8\") as tfile:\n for phrase in excluded:\n tfile.write(phrase)\n tfile.write(\"\\n\")\n\n not_marked = [phrase for phrase in pending_choices if phrase not in excluded]\n for phrase in not_marked:\n included.add(phrase)\n\n with open(INCLUDED, \"w+\", encoding=\"utf-8\") as tfile:\n for phrase in included:\n tfile.write(phrase)\n tfile.write(\"\\n\")", "def tokenizer(self):\n tokenizer = RegexpTokenizer(r'\\w+')\n \n self.tweet_tokenized_train = [tokenizer.tokenize(x.lower()) for x in self.tweet_prepro_train]\n self.tweet_tokenized_test = [tokenizer.tokenize(x.lower()) for x in self.tweet_prepro_test]", "def pd_read(filename = \"tweets.csv\", lower = True):\n tweets = pd.read_csv(filename)\n tweets.drop_duplicates(subset='text', inplace=True)\n if lower:\n tweets.text = tweets.text.str.lower()\n return tweets", "def load_cls_phrases(self, fh):\n for line in fh:\n data = line.strip().split(',')\n # guards against unparseable lines\n if len(data) < 2:\n continue\n code = data[0]\n phrase_array = [data[1].lower().strip()]\n if len(data) > 2:\n syn_array = data[2].lower().strip().split('|')\n for syn in [s for s in syn_array if s not in phrase_array]:\n phrase_array.append(syn) \n\n for phrase in phrase_array:\n for key in set(self.get_key_list(phrase)):\n if key not in self.cls_phrases:\n self.cls_phrases[key] = []\n self.cls_phrases[key].append(code)\n\n return len(self.cls_phrases)", "def read_csv(csv_folder, split, segment_limit, sentence_limit, word_limit):\n assert split in {'train', 'test'}\n\n docs = []\n labels = []\n word_counter = Counter()\n data = pd.read_csv(os.path.join(csv_folder, \"short_concat_\" + split + '.csv'), header=None)\n for i in tqdm(range(data.shape[0])):\n # 전체 문서\n row = list(data.loc[i, :])\n segments = list()\n text = row[0]\n\n # 각 문단을 문장 단위로 잘라 저장\n for paragraph in preprocess(text).splitlines():\n segments.append([s for s in sent_tokenizer.tokenize(paragraph)])\n\n # 단어 단위로 토크나이징\n sentences = list()\n\n for paragraph in segments[:segment_limit]:\n words = list()\n for s in paragraph[:sentence_limit]:\n w = word_tokenizer.tokenize(s)[:word_limit]\n # If sentence is empty (due to removing punctuation, digits, etc.)\n if len(w) == 0:\n continue\n words.append(w)\n word_counter.update(w)\n sentences.append(words)\n # If all sentences were empty\n if len(words) == 0:\n continue\n\n labels.append(int(row[1])) # since labels are 1-indexed in the CSV\n docs.append(sentences)\n\n return docs, labels, word_counter", "def translate_phrases(translator, phrases, language):\n for phrase in phrases:\n translator.type_phrase_to_translate(phrase)\n sleep(0.5)\n translated_phrase = translator.read_translated_phrase()\n add_translation_to_file(language, translated_phrase)", "def tokenize_transcript(tokenize_method,input_transcript):\n final_lst = []\n for i in (range(0,len(input_transcript))):\n #print(tokenize_method(input_transcript[i]))\n final_lst = final_lst + list(set(tokenize_method(input_transcript[i])))\n return final_lst", "def load_corpus_csv(corpus_name, path, delimiter, trans_delimiter='.',\n feature_system_path = ''):\n #begin = time.time()\n corpus = Corpus(corpus_name)\n if feature_system_path:\n feature_matrix = load_binary(feature_system_path)\n corpus.set_feature_matrix(feature_matrix)\n with open(path, encoding='utf-8') as f:\n headers = f.readline()\n headers = headers.split(delimiter)\n if len(headers)==1:\n e = DelimiterError(('Could not parse the corpus.\\n\\Check '\n 'that the delimiter you typed in matches '\n 'the one used in the file.'))\n raise(e)\n\n headers = [h.strip() for h in headers]\n headers[0] = headers[0].strip('\\ufeff')\n if 'feature_system' in headers[-1]:\n headers = headers[0:len(headers)-1]\n\n trans_check = False\n\n for line in f.readlines():\n line = line.strip()\n if not line: #blank or just a newline\n continue\n d = {attribute:value.strip() for attribute,value in zip(headers,line.split(delimiter))}\n for k,v in d.items():\n if k == 'transcription' or 'tier' in k:\n if trans_delimiter:\n trans = v.split(trans_delimiter)\n else:\n trans = [x for x in v]\n if not trans_check and len(trans) > 1:\n trans_check = True\n d[k] = trans\n word = Word(**d)\n if word.transcription:\n #transcriptions can have phonetic symbol delimiters which is a period\n if not word.spelling:\n word.spelling = ''.join(map(str,word.transcription))\n\n corpus.add_word(word)\n if corpus.has_transcription and not trans_check:\n e = DelimiterError(('Could not parse transcriptions with that delimiter. '\n '\\n\\Check that the transcription delimiter you typed '\n 'in matches the one used in the file.'))\n raise(e)\n\n transcription_errors = corpus.check_coverage()\n return corpus", "def twitter_data(filename, dictionary):\r\n new_data = []\r\n with codecs.open(filename, 'r', 'utf8') as f:\r\n for line in f:\r\n new_line = []\r\n stuff = [x for x in line.lower().split() if\r\n ((has_letter(x) or len(x) >= 1) and keep_word(x, num_words, count_dict))]\r\n for word in stuff:\r\n new_line.append(dictionary.get(word, 1))\r\n if len(new_line) > 0:\r\n new_data.append(new_line)\r\n return new_data", "def train(self, corpus):\n self.tokens = []\n self.tags = []\n sentences = corpus.split(NEW_LINE)\n for sentence in sentences:\n start = START_SIGHT + SLASH + START_SIGHT + SPACE + START_SIGHT + SLASH + START_SIGHT + SPACE\n end = SPACE + END + SLASH + END\n sentence = start + sentence + end \n tokens = sentence.split(SPACE)\n for t in tokens:\n token = t.rsplit(SLASH, 1)\n if (len(token) > 1):\n self.tokens.append(token) \n self.tags.append(token[TAG_INDEX])\n \n nonsense_cases = set([(END, START_SIGHT), (START_SIGHT, END),\n (START_SIGHT, START_SIGHT, END),\n (END, START_SIGHT, START_SIGHT)])\n self.bigram_tags = [b for b in zip(self.tags[:-1], self.tags[1:]) if b not in nonsense_cases]\n self.trigram_tags = [t for t in zip(self.tags[:-1], self.tags[1:], self.tags[2:])\\\n if not (t[WORD_INDEX], t[TAG_INDEX]) in nonsense_cases and\\\n not (t[WORD_INDEX], t[TAG_INDEX]) in nonsense_cases]", "def tokenize_text_file(path, sanitize=True, remove_duplicates=False, stopwords=None):\n with open(path, 'r') as f:\n text_string = f.read()\n tokens = tokenize_text_string(\n text_string, sanitize, remove_duplicates, stopwords=stopwords)\n return tokens", "def match_all_phrases(self, inphrases):\n# temporary - attempted matches\n attempted_matches = []\n phrase_attempts = {}\n phrase = \"\"\n step = \"A\"\n # ALL full phrases \n for phrase in inphrases:\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n #return match_choices, attempted_matches, phrase\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n # Normalised version of ALL all full phrases \n phrases = [self.get_normalised_phrase(p) for p in inphrases]\n\n # 3 all prefix trigrams \n step = \"3\"\n for ngram in [p.split()[0:3] for p in phrases if len(p.split()) > 2]:\n phrase = ' '.join(ngram)\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n # 2 all prefix bigrams \n step = \"2\"\n for ngram in [p.split()[0:2] for p in phrases if len(p.split()) > 1]:\n phrase = ' '.join(ngram)\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n # 1 all valid words \n step = \"1\"\n for phr_elem in phrases:\n #print phr_elem.split()\n for phrase in [w.strip() for w in phr_elem.split() \n if self.isExcluded(w.strip()) == False and w.strip() not in phrase_attempts]:\n #print \"***\", phrase\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n return [], attempted_matches, phrase, None", "def extract_phrases(tdocs, docs, idf):\n # Gather existing keyphrases\n keyphrases = set()\n for doc in tdocs:\n for t in doc:\n if len(t.split(' ')) > 1:\n keyphrases.add(t)\n\n # Count document co-occurrences\n t_counts = defaultdict(int)\n pair_docs = defaultdict(list)\n for i, terms in enumerate(tdocs):\n # We dont convert the doc to a set b/c we want to preserve order\n # Iterate over terms as pairs\n for pair in zip(terms, terms[1:]):\n t_counts[pair] += 1\n pair_docs[pair].append(i)\n\n # There are a lot of co-occurrences, filter down to those which could\n # potentially be phrases.\n t_counts = {kw: count for kw, count in t_counts.items() if count >= 2}\n\n # Identify novel phrases by looking at\n # keywords which co-occur some percentage of the time.\n # This could probably be more efficient/cleaned up\n for (kw, kw_), count in t_counts.items():\n # Only consider terms above a certain avg global IDF (to reduce noise)\n if (idf[kw]+idf[kw_])/2 <= 0.4:\n continue\n\n # Look for phrases that are space-delimited or joined by 'and' or '-'\n ph_reg = re.compile('({0}|{1})( |-)(and )?({0}|{1})'.format(kw, kw_))\n\n # Extract candidate phrases and keep track of their counts\n phrases = defaultdict(int)\n phrase_docs = defaultdict(set)\n for i in pair_docs[(kw, kw_)]:\n for m in ph_reg.findall(docs[i].lower()):\n phrases[''.join(m)] += 1\n phrase_docs[''.join(m)].add(i)\n\n if not phrases:\n continue\n\n # Get the phrase encountered the most\n top_phrase = max(phrases.keys(), key=lambda k: phrases[k])\n top_count = phrases[top_phrase]\n\n # Only count phrases that appear in _every_ document\n if top_count/count == 1:\n # Check if this new phrase is contained by an existing keyphrase.\n if any(top_phrase in ph for ph in keyphrases):\n continue\n keyphrases.add(top_phrase)\n\n # Add the new phrase to each doc it's found in\n for i in phrase_docs[top_phrase]:\n tdocs[i].append(top_phrase)\n\n return tdocs, keyphrases", "def preprocess_docs():\n\n print(\"Getting started!\")\n stopwords.populate_stopwords(NLP, STOPWORD_URL)\n\n print(str.format(\"Using data dir:{}\", DATA_DIR))\n\n csv_file = open(os.path.join(DATA_DIR, 'PDFs.csv'))\n reader = csv.reader(csv_file, 'excel')\n rows = list(reader)\n\n filenames = [_get_filename(row) for row in rows]\n\n pool = Pool(multiprocessing.cpu_count())\n\n try:\n pool.map(_get_item, rows)\n pool.map(pdf.extract_text, filenames)\n docs = pool.map(_extract_questions, rows)\n docs = [d for d in docs if d is not None]\n\n _find_similar(docs, simdoc=compare.compare_doc_keywords)\n\n for doc in docs:\n if doc is None:\n continue\n doc.save_json()\n\n except KeyboardInterrupt:\n pool.terminate()\n print(\"You cancelled the program!\")\n sys.exit(1)\n\n print(\"Done\")", "def tokenize(\n tokenizer: Tokenizer,\n lines: typing.Iterable[str],\n language: typing.Optional[str] = None,\n is_csv: bool = False,\n csv_delimiter: str = \"|\",\n split_sentences: bool = False,\n inline_pronunciations: bool = False,\n) -> typing.Iterable[typing.Dict[str, typing.Any]]:\n # String used to join tokens.\n # See RegexTokenizer\n join_str: str = getattr(tokenizer, \"join_str\", \" \")\n\n if inline_pronunciations:\n assert language is not None\n lang_phonemes = gruut_ipa.Phonemes.from_language(language)\n assert lang_phonemes is not None, f\"Unsupported language {language}\"\n\n def process_lines(lines):\n for line in lines:\n yield encode_inline_pronunciations(line, lang_phonemes)\n\n lines = process_lines(lines)\n\n for line in lines:\n line = line.strip()\n if not line:\n continue\n\n utt_id = \"\"\n\n if is_csv:\n # Input format is id|text\n utt_id, line = line.split(csv_delimiter, maxsplit=1)\n\n sentences = list(tokenizer.tokenize(line))\n\n if split_sentences:\n # One output line per sentence\n for sentence_idx, sentence in enumerate(sentences):\n sentence_id = str(sentence_idx)\n if utt_id:\n sentence_id = f\"{utt_id}_{sentence_id}\"\n\n yield {\n \"id\": sentence_id,\n \"raw_text\": sentence.raw_text,\n \"raw_words\": sentence.raw_words,\n \"clean_words\": sentence.clean_words,\n \"tokens\": [dataclasses.asdict(t) for t in sentence.tokens],\n \"clean_text\": sentence.clean_text,\n \"sentences\": [],\n }\n else:\n # One output line per input line\n raw_words: typing.List[str] = []\n clean_words: typing.List[str] = []\n tokens: typing.List[Token] = []\n\n for sentence in sentences:\n raw_words.extend(sentence.raw_words)\n clean_words.extend(sentence.clean_words)\n tokens.extend(sentence.tokens)\n\n yield {\n \"id\": utt_id,\n \"raw_text\": line,\n \"raw_words\": raw_words,\n \"clean_words\": clean_words,\n \"tokens\": [dataclasses.asdict(t) for t in tokens],\n \"clean_text\": join_str.join(clean_words),\n \"sentences\": [dataclasses.asdict(s) for s in sentences],\n }", "def tokenize_corpus(corpus, tokens_fname):\n if os.path.isfile(tokens_fname) and os.path.exists(tokens_fname):\n print('Cached tokens found.')\n with open(tokens_fname, 'rb') as f:\n final_tokens = pickle.load(f)\n else:\n print('Tokenizing data...')\n tokens_corpus = []\n # TweetTokenizer doesn't split words with apostrophes\n # tokenizer = TweetTokenizer()\n tokenizer = RegexpTokenizer(r'\\w+')\n # Create English stop words list\n en_stop = get_stop_words('en') + STOP_WORDS\n # Create p_stemmer of class PorterStemmer\n p_stemmer = PorterStemmer()\n\n for doc in corpus:\n raw = doc.lower()\n tokens = tokenizer.tokenize(raw)\n tokens_corpus.append(tokens)\n\n print('Removing stop words from tokens...')\n stopped_tokens = Parallel(n_jobs=multiprocessing.cpu_count(), prefer='threads')(delayed(remove_stop_words)(en_stop, tokens) for tokens in tqdm(tokens_corpus))\n\n print('Stemming tokens...')\n stemmed_tokens = Parallel(n_jobs=multiprocessing.cpu_count(), prefer='threads')(delayed(stem_tokens)(p_stemmer, tokens) for tokens in tqdm(stopped_tokens))\n\n print('Removing low frequency tokens...')\n freq_list = defaultdict(int)\n for doc in stemmed_tokens:\n for token in doc:\n freq_list[token] += 1\n final_tokens = Parallel(n_jobs=multiprocessing.cpu_count(), prefer='threads')(delayed(remove_low_freq_tokens)(freq_list, tokens) for tokens in tqdm(stemmed_tokens))\n\n # Cache tokens\n with open(tokens_fname, 'wb') as f:\n pickle.dump(final_tokens, f)\n\n return final_tokens", "def tokenize_records(records):\r\n contents = map(lambda record: record[constants.TEXT], records)\r\n tokenized_records = [word_tokenize(record.lower()) for record in contents]\r\n lemmatized_records = lemmatize_words(tokenized_records)\r\n lemmatized_words = list()\r\n for lemmatized_record in lemmatized_records:\r\n lemmatized_words.extend(lemmatized_record)\r\n return lemmatized_words" ]
[ "0.6272862", "0.6200871", "0.5733688", "0.5680239", "0.5663839", "0.563407", "0.56105554", "0.5597843", "0.5545445", "0.55262184", "0.55032367", "0.5449279", "0.54271376", "0.53965634", "0.5390097", "0.5350434", "0.533897", "0.5293532", "0.5286479", "0.527831", "0.5262325", "0.5258798", "0.5255156", "0.5244601", "0.52323055", "0.52259624", "0.5223602", "0.52204406", "0.52117306", "0.5208858" ]
0.75166154
0
Moves up to the parent directory
def moveUp(): os.chdir("..")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_to_parent(self, path):\n if path == self.dir_to_check:\n print (' Parent directory out of scope!')\n return path\n else:\n dir_name = os.path.dirname(path)\n return dir_name", "def _goUp(self) -> None:\n self._openPath(path=self._currPath.parent)", "def cd_up(self):\n parts = self.cwd.split(\"\\\\\")\n self.cwd = \"\"\n for i in parts[:-1]:\n self.cwd += i + \"\\\\\"\n self.cwd = self.cwd[:-1]", "def move_up(self, directory=\"..\", start=False):\n try:\n curdir = os.getcwd()\n os.chdir(directory)\n if not start:\n self.reload(os.getcwd(), curdir)\n except:\n self.vimiv.statusbar.err_message(\"Error: directory not accessible\")", "def movedir(self):\n pass", "def move_up(self):\n\n prev_sibling = self.get_previous_sibling()\n if prev_sibling!=None: \n self.move_to(prev_sibling,'left')\n self.save()", "def move_file_up_one_level(file: Path) -> bool:\n destination = file.parents[1] / file.name\n try:\n file.rename(destination)\n return True\n except:\n return False", "def setParentIndir(self,dir):\n\n self.parentindir = dir\n self.rundir = os.path.join(self.parentindir,self.runname)", "def moveDown(currentDir):\r\n\tnewDir = input(\"Enter the directory name: \")\r\n\tif os.path.exists(currentDir + os.sep + newDir) and os.path.isdir(newDir):\r\n\t\t\tos.chdir(newDir)\r\n\telse:\r\n\t\tprint(\"ERROR: no such name...\")", "def makedirs(self, parent):\n if not parent.exists():\n logging.msg('Creating directory structure for \"%s\"' % (\n parent.path,), verbosity=2)\n parent.makedirs()", "def mkParentDir(path):\n parentDir = os.path.dirname(path)\n if parentDir:\n return mkDir(parentDir)", "def do_stage(self, mirror_only=False):\n super().do_stage(mirror_only)\n stsrc = self.stage.source_path\n srcpath = os.path.join( stsrc, self.build_directory )\n ppath = ancestor (srcpath)\n shutil.move(stsrc, stsrc+\"_old\")\n mkdirp(ppath)\n shutil.move(stsrc+\"_old\",srcpath)", "def setParentOutdir(self,dir):\n\n self.parentoutdir = dir\n self.outdir = os.path.join(self.parentoutdir,self.runname)\n\n self.setOutputDir(self.outdir)", "def change_dir(path): \r\n os.chdir(path)", "def previous_directory(self):\r\n prev_dir = Path(self.path_viewer.text()).parent\r\n self.set_new_path(str(prev_dir))", "def parentOrThisDir(path):\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n return path", "def move_up(self):\n self.move_step(-1)", "def move(self, target):\n if target.relto(self):\n raise error.EINVAL(target, \"cannot move path into a subdirectory of itself\")\n try:\n self.rename(target)\n except error.EXDEV: # invalid cross-device link\n self.copy(target)\n self.remove()", "def pushd(cls, new_dir):\n previous_dir = os.getcwd()\n try:\n new_ab_dir = None\n if os.path.isabs(new_dir):\n new_ab_dir = new_dir\n else:\n new_ab_dir = os.path.join(previous_dir, new_dir)\n # Use absolute path to show it on FileNotFoundError message.\n cls.cd(new_ab_dir)\n yield\n finally:\n cls.cd(previous_dir)", "def _parent_path(cls,path):\n # os.path.dirname(), but strip directories like files (like unix basename)\n # Treat directories like files...\n if path[-1]=='/':\n path=path[:-1]\n ret = os.path.dirname(path)\n return ret", "def percolate_up(self, index):\n # reached root\n if index == 0:\n return\n\n p_ind = (index-1)//2\n # swap if parent is greater than current and continue percolating\n if self._data[p_ind] > self._data[index]:\n self.swap(p_ind, index)\n self.percolate_up(p_ind)", "def makedirs(self):\n normpath = os.path.normpath(self.path)\n parentfolder = os.path.dirname(normpath)\n if parentfolder:\n try:\n os.makedirs(parentfolder)\n except OSError:\n pass", "def chdir(self, path):\n # temporarily join the specified directory to see if we have\n # permissions to do so\n basedir = os.getcwd()\n try:\n os.chdir(path)\n except os.error:\n raise\n else:\n os.chdir(basedir)\n self.cwd = self.fs2ftp(path)", "def move_from_temp_directory(self):", "def reparent(self, obj, parent):\n return self.update(obj, parent=parent)", "def parent_dir_path(path):\n return absolute_path(os.path.dirname(path))", "def percolate_up(self, position):\n parent = self.parent(position)\n if position > 0 and self.table[position] < self.table[parent]: # not root and child > parent\n self.swap(position, parent)\n self.percolate_up(parent) # recurse", "def popd():\n from twill import commands\n \n where = _dirstack.pop()\n os.chdir(where)\n print('popped back to directory \"%s\"' % (where,), file=commands.OUT)\n\n commands.setglobal('__dir__', where)", "def change_dir(filename):", "def chdir(path):\n\tos.chdir(path)\n\tsyntax = \"cd '%s'.\" % path\n\tif __debug__:\n\t\tprint syntax\n\tspss.Submit(syntax)" ]
[ "0.73682714", "0.72242", "0.68628967", "0.6374753", "0.62987864", "0.6281617", "0.6231118", "0.6118306", "0.60952276", "0.607766", "0.5977025", "0.59579486", "0.59060633", "0.5898593", "0.5897268", "0.58351576", "0.58234763", "0.58182573", "0.57882106", "0.5750799", "0.57429105", "0.57413423", "0.56916356", "0.5687931", "0.56806695", "0.5660418", "0.5658234", "0.565452", "0.5652943", "0.56319505" ]
0.82439893
0
Returns the number of files in the cwd and all it's subdirectories
def countFiles(path): count = 0 lyst = os.listdir(path) for element in lyst: if os.path.isfile(element): count += 1 else: os.chdir(element) count += countFiles(os.getcwd()) os.chdir("..") return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_dir(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_dir(recursive=True):\n n += 1\n return n", "def n_subdir(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_dir(recursive=False):\n n += 1\n return n", "def count_dirs_and_files(directory='.'):\n pass", "def count_files(path):\n count = 0\n for root, dirs, files in os.walk(path):\n for f in files:\n count += 1\n return count", "def n_file(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=True):\n n += 1\n return n", "def count_files_in_one_directory(dir_path):\r\n files_list = files_in_dir(dir_path)\r\n return len(files_list)", "def get_subdir_filenum(super_path):\n \"\"\"获取所有子目录下的文件个数\"\"\"\n if not os.path.exists(super_path):\n return 0\n cnt = 0\n file_list =[]\n for r, dirs, files in os.walk(super_path):\n print(dirs)\n for dr in dirs:\n print(\"nothing\")\n cnt += len(glob.glob(os.path.join(r, dr + \"/*\")))\n return cnt", "def get_nb_files(directory):\n if not os.path.exists(directory):\n return 0\n cnt = 0\n for r, dirs, files in os.walk(directory):\n for dr in dirs:\n cnt += len(glob.glob(os.path.join(r, dr + \"/*\")))\n\n print(\"Number of files in\", directory, \" is \", cnt)\n return cnt", "def get_nb_files(directory):\r\n if not os.path.exists(directory):\r\n return 0\r\n cnt = 0\r\n for r, dirs, files in os.walk(directory):\r\n for dr in dirs:\r\n cnt += len(glob.glob(os.path.join(r, dr + \"/*\")))\r\n return cnt", "def count_files(directory):\n if not os.path.exists(directory):\n return 0\n cnt = 0\n for r, dirs, files in os.walk(directory):\n for dr in dirs:\n cnt += len(glob.glob(os.path.join(r, dr + \"/*\")))\n return cnt", "def get_nb_files(directory):\n if not os.path.exists(directory):\n return 0\n cnt = 0\n for r, dirs, files in os.walk(directory):\n for dr in dirs:\n cnt += len(glob.glob(os.path.join(r, dr + \"/*\")))\n return cnt", "def countBytes(path):\r\n\tcount = 0 \r\n\tlyst = os.listdir(path)\r\n\tfor element in lyst:\r\n\t\tif os.path.isfile(element):\r\n\t\t\tcount += os.path.getsize(element)\r\n\t\telse:\r\n\t\t\tos.chdir(element)\r\n\t\t\tcount += countBytes(os.getcwd())\r\n\t\t\tos.chdir(\"..\")\r\n\treturn count", "def get_nb_files(directory):\r\n if not os.path.exists(directory):\r\n return 0\r\n cnt = 0\r\n for r, dirs, files in os.walk(directory):\r\n for dr in dirs:\r\n cnt += len(glob.glob(os.path.join(r, dr + \"/*\"))) # glob模块是用来查找匹配文件的,后面接匹配规则。\r\n return cnt", "def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n", "def count_files_in_subdirectories(path_dir_path):\r\n counter = []\r\n\r\n for subdirectory in os.listdir(path_dir_path):\r\n counter.append(count_files_in_one_directory(\r\n os.path.join(path_dir_path, subdirectory)))\r\n\r\n return counter", "def count_files_dir(self,full_path):\n try:\n num_files = len([name for name in os.listdir(full_path) if os.path.isfile(self.FILENAME)])\n print(f\"Number of files in {full_path} is {num_files}\")\n return num_files\n except Exception as e:\n raise SystemExit(f\"Could not complete operation: {e}\")", "def count(args):\n path = os.path.abspath(args.path)\n total = 0\n\n if args.recursive:\n if os.path.exists(args.path):\n for item in os.listdir(path):\n little_path = os.path.join(path, item)\n if os.path.isfile(little_path):\n total += parse_file_count(little_path, args)\n else:\n total += count(little_path)\n else:\n print(\"EROARE: <\" + args.path +\n \"> invalid, nu putem ajunge acolo\")\n else:\n if os.path.isfile(args.path):\n total += parse_file_count(args.path, args)\n else:\n print(\"EROARE: <\" + args.pattern +\n \"> invalid, nu este fisier\")\n return total", "def dirsize(self):\n total = 0\n for p in self.select_file(recursive=True):\n try:\n total += p.size\n except: # pragma: no cover\n print(\"Unable to get file size of: %s\" % p)\n return total", "def nr_files(dirpath: pathlib.Path) -> int:\n nr_files_indir = 0\n for path in dirpath.iterdir():\n if not path.is_file():\n continue\n nr_files_indir += 1\n return nr_files_indir", "def all_files_size():\n size = 0\n for dirpath, _dirnames, filenames in os.walk('images'):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n size += os.path.getsize(fp)\n return size", "def get_number_of_files(directory: str):\n\n number_of_files = len([item for item in os.listdir(directory) if os.path.isfile(os.path.join(directory, item))])\n print(number_of_files)\n return number_of_files", "def get_tree_size(path):\n total = 0\n for entry in os.scandir(path):\n if entry.is_dir(follow_symlinks=False):\n total += get_tree_size(entry.path)\n else:\n total += entry.stat(follow_symlinks=False).st_size\n return total", "def fileCounter(directory):", "def get_tree_size(path):\r\n size = 0\r\n try:\r\n for entry in scandir.scandir(path):\r\n if entry.is_dir():\r\n size += get_tree_size(os.path.join(path, entry.name))\r\n else:\r\n size += entry.lstat().st_size\r\n except OSError:\r\n pass\r\n return size", "def count_files(self):\n self.file_count = 0\n self.count_files_loop(self.dirpath)\n return", "def get_tree_size(dir):\n\t\t\t\t\t\ttotal = 0\n\t\t\t\t\t\tfor entry in os.scandir(dir): # import os needed\n\t\t\t\t\t\t\tif entry.is_dir(follow_symlinks=False):\n\t\t\t\t\t\t\t\ttotal += get_tree_size(entry.path)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\ttotal += entry.stat(follow_symlinks=False).st_size\n\t\t\t\t\t\treturn total", "async def num_fomod_files_to_install(self):\n n = 0\n for f in self.fomod.files_to_install:\n if f.type == \"folder\":\n n += await self.count_folder_contents(f.source)\n else:\n n += 1\n\n return n", "def count(self):\n count = 0\n # get list of intermediate directories\n dirs = []\n self.__get_list_of_interm_dirs(dirs)\n # count elements in sub-directories\n for name in dirs:\n for element in os.listdir('%s/%s' % (self.path, name)):\n if _ELEMENT_REGEXP.match(element):\n count += 1\n return count", "def count(path):\n\ttry:\n\t\treturn len(os.listdir(path))\n\texcept Exception as e:\n\t\t# We most probably hit a permission denied here\n\t\treturn -1", "def getKeywordCount(cwd, keyword):\n lines_count = 0\n\n for dirpath, dirnames, filenames in os.walk(cwd):\n for file in filenames:\n file_path = os.path.join(dirpath, file)\n lines_count += countLines(file_path, keyword)\n return lines_count" ]
[ "0.7911948", "0.7740002", "0.7673589", "0.75395423", "0.75390685", "0.7531299", "0.7515497", "0.7512118", "0.7510764", "0.75033957", "0.7492738", "0.7461917", "0.7286391", "0.7270033", "0.72661525", "0.71882087", "0.71850646", "0.71713626", "0.7166277", "0.7139466", "0.69795644", "0.694521", "0.6915523", "0.6912954", "0.6882918", "0.6827035", "0.6790353", "0.67876154", "0.6772236", "0.66751915" ]
0.7854723
1
Returns the number of bytes in the cwd and all its subdirectories
def countBytes(path): count = 0 lyst = os.listdir(path) for element in lyst: if os.path.isfile(element): count += os.path.getsize(element) else: os.chdir(element) count += countBytes(os.getcwd()) os.chdir("..") return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dirsize(self):\n total = 0\n for p in self.select_file(recursive=True):\n try:\n total += p.size\n except: # pragma: no cover\n print(\"Unable to get file size of: %s\" % p)\n return total", "def n_dir(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_dir(recursive=True):\n n += 1\n return n", "def get_tree_size(dir):\n\t\t\t\t\t\ttotal = 0\n\t\t\t\t\t\tfor entry in os.scandir(dir): # import os needed\n\t\t\t\t\t\t\tif entry.is_dir(follow_symlinks=False):\n\t\t\t\t\t\t\t\ttotal += get_tree_size(entry.path)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\ttotal += entry.stat(follow_symlinks=False).st_size\n\t\t\t\t\t\treturn total", "def get_tree_size(path):\n total = 0\n for entry in os.scandir(path):\n if entry.is_dir(follow_symlinks=False):\n total += get_tree_size(entry.path)\n else:\n total += entry.stat(follow_symlinks=False).st_size\n return total", "def get_tree_size(path):\r\n size = 0\r\n try:\r\n for entry in scandir.scandir(path):\r\n if entry.is_dir():\r\n size += get_tree_size(os.path.join(path, entry.name))\r\n else:\r\n size += entry.lstat().st_size\r\n except OSError:\r\n pass\r\n return size", "def size_nbytes(self) -> int:\n self.__verify_repo_initialized()\n return folder_size(self._repo_path, recurse=True)", "def n_subdir(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_dir(recursive=False):\n n += 1\n return n", "def get_dirsize(start_path):\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(start_path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n total_size += os.path.getsize(fp)\n return total_size", "def get_size(start_path='.'):\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(start_path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n total_size += os.path.getsize(fp)\n return total_size", "def dir_size(dir_path):\n def fsize(path):\n target = path.resolve()\n if target.is_file():\n return target.stat().st_size\n else:\n return 0\n return sum(fsize(child) for child in dir_path.iterdir())", "def all_files_size():\n size = 0\n for dirpath, _dirnames, filenames in os.walk('images'):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n size += os.path.getsize(fp)\n return size", "def get_temp_dir_size(self):\n return sizeof_fmt(get_dir_size(self.temp_dir))", "def get_temp_dir_size(self):\n return sizeof_fmt(get_dir_size(self.temp_dir))", "def dir_size(start_path):\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(start_path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n if os.path.exists(fp):\n try:\n total_size += os.path.getsize(fp)\n except:\n continue\n # convert to MB\n return int(total_size * 1.0 / 10000000)", "def get_size(self):\n return get_dir_size(self.run_dir)", "def dir_size(dir):\n from os.path import getsize, islink, isdir, exists, join\n\n if exists(dir) and (not isdir(dir) and not islink(dir)):\n #so, this is not a directory but file..\n return getsize(dir)\n\n if islink(dir):\n return int(len(os.readlink(dir)))\n\n def sizes():\n for root, dirs, files in os.walk(dir):\n yield sum([getsize(join(root, name)) for name in files if not islink(join(root,name))])\n yield sum([int(len(os.readlink((join(root, name))))) for name in files if islink(join(root,name))])\n return sum( sizes() )", "def count_dirs_and_files(directory='.'):\n pass", "def get_subdir_filenum(super_path):\n \"\"\"获取所有子目录下的文件个数\"\"\"\n if not os.path.exists(super_path):\n return 0\n cnt = 0\n file_list =[]\n for r, dirs, files in os.walk(super_path):\n print(dirs)\n for dr in dirs:\n print(\"nothing\")\n cnt += len(glob.glob(os.path.join(r, dr + \"/*\")))\n return cnt", "def test_get_directory_size_bytes():\n with tempfile.TemporaryDirectory() as tmpdir:\n tmpdir2 = os.path.join(tmpdir, \"tmp\")\n os.makedirs(tmpdir2, exist_ok=True)\n files = [\n os.path.join(tmpdir, \"file1.bin\"),\n os.path.join(tmpdir, \"file2.bin\"),\n os.path.join(tmpdir, \"tmp\", \"file3.bin\"),\n os.path.join(tmpdir, \"tmp\", \"file4.bin\"),\n ]\n data = \"1234567890\"\n for filename in files:\n with open(filename, \"w\") as f_out:\n f_out.write(data)\n\n assert get_directory_size_bytes(tmpdir, recursive=True) == 40\n assert get_directory_size_bytes(tmpdir, recursive=False) == 20", "def get_amount_of_data(directory: str):\n size = sum([os.path.getsize(os.path.join(directory, item)) for item in os.listdir(directory) if os.path.isfile(os.path.join(directory, item))])\n print(size)\n return size", "def disk_usage(path):\n total = os.path.getsize(path) # Account for direct usage of directory\n if os.path.isdir(path): # if this is a dir\n for filename in os.listdir(path): # go through the child of the directory\n childpath = os.path.join(path, filename) # Compose full path to child\n total += disk_usage(childpath)\n\n print('{0:<7}'.format(total), path)\n return total", "def dir_size(directory: str) -> int:\n size = 0\n for file in os.listdir(directory):\n filename = os.path.join(directory, file)\n size += os.path.getsize(filename)\n return size", "def get_dir_size(dirname):\n size = 0\n for fname in os.listdir(dirname):\n fname = os.path.join(dirname, fname)\n if os.path.isfile(fname):\n size += os.path.getsize(fname)\n return size", "def directorySize(directory):\n fs = 0.\n for path, dirs, files in os.walk(directory):\n for file in files:\n name = os.path.join(path, file)\n fs += os.path.getsize(name)\n\n return fs / 1024.", "def count_deleted_bytes(self): # DirObj.count_deleted_bytes\n bytes=0\n for name, d in self.subdirs.iteritems():\n bytes = bytes + d.count_deleted_bytes()\n for name, f in self.files.iteritems():\n if f.deleted:\n bytes = bytes + f.count_deleted_bytes()\n return bytes", "def n_file(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=True):\n n += 1\n return n", "def size(path):", "def get_directory_size(dir_path: str) -> int:\n total_size = 0\n for dirpath, _, filenames in os.walk(dir_path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n total_size += get_file_size(fp)\n return total_size", "def __get_cache_size(self):\n total = 0\n for entry in os.scandir(self.cacheDir):\n total += entry.stat(follow_symlinks=False).st_size\n if self.__log:\n self.__logger.info(f\"Cache size: {total} bytes\")\n return total", "def folder_size(path):\n return sum(getsize(f) for f in os.listdir('.') if isfile(f))" ]
[ "0.7495459", "0.7329888", "0.73292416", "0.72654724", "0.7200473", "0.7190872", "0.71110535", "0.7103087", "0.70675343", "0.6952663", "0.69456893", "0.6905677", "0.6905677", "0.68528646", "0.6847595", "0.6846488", "0.6838742", "0.6820271", "0.6784078", "0.67830443", "0.6754694", "0.67324346", "0.6721072", "0.6690807", "0.66874325", "0.66845983", "0.6666148", "0.66292524", "0.6622177", "0.6594877" ]
0.79827064
0
Returns a list of the filenames that contain the target string in the cwd and all its subdirectories
def findFiles(target, path): files = [] lyst = os.listdir(path) for element in lyst: if os.path.isfile(element): if target in element: files.append(path + os.sep + element) else: os.chdir(element) files.extend(findFiles(target, os.getcwd())) os.chdir("..") return files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def locate(root = '.', target = 'info'):\n \n matches = []\n \n for root, dirnames, filenames in os.walk(root):\n for dirnames in fnmatch.filter(dirnames, target):\n matches.append(os.path.join(root, dirnames))\n \n return matches", "def recursive_search(path, target_files):\n for root, _dirs, files in os.walk(path):\n for filename in files:\n if filename in target_files:\n return os.path.join(root, filename)", "def find_files(root, directory, filename):\n\n path_list = []\n walker = os.walk(root, followlinks=True)\n for root, dirs, files in walker:\n remove_vcs_dirs(dirs)\n\n #if dirs containt 'directory', don't walk others\n if directory in dirs: dirs[:] = [directory]\n\n if root.endswith(os.path.sep + directory):\n if filename in files:\n path_list.append(os.path.join(root, filename))\n dirs[:] = []\n\n return path_list", "def find(self, path_list):\n import fnmatch\n path_list2 = []\n for pattern in path_list:\n for root, _, filenames in os.walk('.'):\n for filename in fnmatch.filter(filenames, pattern):\n path_list2.append(os.path.join(root, filename))\n return path_list2", "def _FindFileNamesInDirectory(input_api, dir_path, search_file_names):\n matches = []\n for _, _, file_names in input_api.os_walk(dir_path):\n for file_name in file_names:\n if file_name in search_file_names:\n matches.append(file_name)\n return matches", "def _recursive_file_search(self, path, pattern):\n matches = []\n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, pattern):\n matches.append(os.path.join(root, filename))\n\n return matches", "def _find_files(root_dir, should_include):\n paths = [] # Return value.\n\n is_module = lambda path: path.endswith(\".py\")\n\n # os.walk() is new in Python 2.3\n # http://docs.python.org/library/os.html#os.walk\n for dir_path, dir_names, file_names in os.walk(root_dir):\n new_paths = [os.path.join(dir_path, file_name) for file_name in file_names]\n new_paths = filter(is_module, new_paths)\n new_paths = filter(should_include, new_paths)\n paths.extend(new_paths)\n\n return paths", "def get_filepaths(keyword, directory):\n \n matches = []\n filenames_total = []\n \n for root, dirnames, filenames in os.walk(directory):\n for filename in filenames:\n if keyword in filename:\n matches.append(root + '/' + filename)\n filenames_total.append(filename)\n return matches, filenames_total", "def files_in( d ):\n return [ join(d,f) for f in os.listdir(d) if isfile(join(d,f)) ]", "def search(self, src, exclude_pattern = [\"**/*.pyc\"], include_pattern = [\"**/*.py\"]):\n src = os.path.abspath(src)\n \n _target = Path(src)\n _target._flavour.casefold = lambda x : x # basic windows path don't distinguish upper / lower case.\n allfiles = list(_target.glob(\"**/*\"))\n \n exclude = list()\n for _ex in exclude_pattern:\n exclude += _target.glob(_ex) \n \n include = list()\n for _in in include_pattern:\n include += _target.glob(_in) \n \n _target_path = set(allfiles) - set(exclude) | set(include)\n \n _target_dir_path = sorted(list(x for x in _target_path if x.is_dir() is True))\n _target_file_path = sorted(list(x for x in _target_path if x.is_file() is True))\n \n return _target_dir_path, _target_file_path", "def full_find(self, file, version):\n matches = []\n for root, dirnames, filenames in os.walk(self.checkout_path(version)):\n for filename in fnmatch.filter(filenames, file):\n matches.append(os.path.join(root, filename))\n return matches", "def get_file_list(work_dir, match_flag='*.*'):\n matches = []\n for root, dir, files in os.walk(work_dir):\n for items in fnmatch.filter(files, match_flag):\n matches.append(os.path.realpath(os.path.join(root, items)))\n\n return matches", "def glob(glob_pattern: str, directoryname: str) -> List[str]:\n matches = []\n for root, dirnames, filenames in os.walk(directoryname):\n for filename in fnmatch.filter(filenames, glob_pattern):\n absolute_filepath = os.path.join(root, filename)\n matches.append(absolute_filepath)\n return matches", "def files_in_dir(root_dir):\n file_set = set()\n\n for dir_, _, files in os.walk(root_dir):\n for file_name in files:\n rel_dir = os.path.relpath(dir_, root_dir)\n rel_file = os.path.join(rel_dir, file_name)\n file_set.add(rel_file)\n\n return [Path(PureWindowsPath(f)) for f in file_set]", "def get_paths(pattern):\n if not in_source_tree:\n pattern = '../' + pattern\n\n files = glob.glob(os.path.normpath(os.path.join(top_dir, pattern)))\n return files", "def _get_target_files(self) -> List[Path]:\n repo = get_git_repo()\n submodules = repo.submodules # type: ignore\n submodule_paths = [\n self._fname_to_path(repo, submodule.path) for submodule in submodules\n ]\n\n # resolve given paths relative to current working directory\n paths = [p.resolve() for p in self._paths]\n if self._base_commit is not None:\n paths = [\n a\n for a in (self._status.added + self._status.modified)\n # diff_path is a subpath of some element of input_paths\n if any((a == path or path in a.parents) for path in paths)\n ]\n changed_count = len(paths)\n click.echo(f\"| looking at {unit_len(paths, 'changed path')}\", err=True)\n paths = [\n path\n for path in paths\n if all(\n submodule_path not in path.parents\n for submodule_path in submodule_paths\n )\n ]\n if len(paths) != changed_count:\n click.echo(\n f\"| skipping files in {unit_len(submodule_paths, 'submodule')}: \"\n + \", \".join(str(path) for path in submodule_paths),\n err=True,\n )\n\n # Filter out ignore rules, expand directories\n self._ignore_rules_file.seek(0)\n patterns = Parser(self._base_path).parse(self._ignore_rules_file)\n\n file_ignore = FileIgnore(\n base_path=self._base_path, patterns=patterns, target_paths=paths\n )\n\n walked_entries = list(file_ignore.entries())\n click.echo(\n f\"| found {unit_len(walked_entries, 'file')} in the paths to be scanned\",\n err=True,\n )\n filtered: List[Path] = []\n for elem in walked_entries:\n if elem.survives:\n filtered.append(elem.path)\n\n skipped_count = len(walked_entries) - len(filtered)\n if skipped_count:\n click.echo(\n f\"| skipping {unit_len(range(skipped_count), 'file')} based on path ignore rules\",\n err=True,\n )\n\n relative_paths = [path.relative_to(self._base_path) for path in filtered]\n\n return relative_paths", "def get_path_names(directory):\n paths_without_source = set()\n paths = glob.glob(source + \"**/*.*\", recursive=True)\n for p in paths:\n paths_without_source.add(p.replace(directory, \"\", 1))\n\n return paths_without_source", "def FindCheckerFiles(path):\n if not path:\n Logger.fail(\"No source path provided\")\n elif os.path.isfile(path):\n return [ path ]\n elif os.path.isdir(path):\n foundFiles = []\n for root, dirs, files in os.walk(path):\n for file in files:\n extension = os.path.splitext(file)[1]\n if extension in [\".java\", \".smali\"]:\n foundFiles.append(os.path.join(root, file))\n return foundFiles\n else:\n Logger.fail(\"Source path \\\"\" + path + \"\\\" not found\")", "def find_files(base_path,pattern):\n res=()\n print_verbose(2,\"\\t> Recursive search: Base path = %s, pattern = %s\" %(base_path,pattern))\n for root, dirs, files in os.walk(base_path, topdown=True):\n for f_name in fnmatch.filter(files, pattern):\n res= res + (os.path.join(root, f_name),)\n return res;", "def get_all_files(cwd):\n return os.listdir(cwd)", "def get_target_paths(to_dir,report=False):\n paths = []\n filenames = os.listdir(to_dir)\n for filename in filenames:\n path = os.path.join(to_dir,filename)\n if filename.endswith('~') or filename in SKIPFILES:\n if report:\n print 'Skipping %s' % filename\n continue \n elif (not os.path.isfile(path)) and (not os.path.isdir(path)):\n if report:\n print 'Skipping %s (not a file or directory)' % filename\n continue\n elif filename.startswith('.'):\n if report:\n print 'Skipping %s (filename has a leading dot)' % filename\n continue\n else:\n if HOSTNAME_SEPARATOR in filename:\n # This appears to be a filename with a trailing\n # hostname, e.g. _muttrc__dulip. If the trailing\n # hostname matches the hostname of this host then we\n # link to it.\n hostname = filename.split(HOSTNAME_SEPARATOR)[-1]\n if hostname == HOSTNAME:\n paths.append(path)\n else:\n if report:\n print 'Skipping %s (different hostname)' % filename\n continue \n else:\n # This appears to be a filename without a trailing\n # hostname.\n if filename + HOSTNAME_SEPARATOR + HOSTNAME in filenames: \n if report:\n print 'Skipping %s (there is a host-specific version of this file for this host)' % filename\n continue\n else: \n paths.append(path) \n return paths", "def search_files(filename, search_path, pathsep=os.pathsep):\n clidFiles = []\n for path in search_path.split(pathsep):\n candidate = os.path.join(path, filename)\n if os.path.exists(candidate): clidFiles.append(os.path.abspath(candidate))\n return clidFiles", "def find_all_files(self):\n look4files = [ f for f in listdir(self.file_location) if isfile(join(self.file_location,f)) ]\n return look4files", "def search_file(path, f):\n output = []\n for root, dirs, files in os.walk(path, topdown=True):\n for file in files:\n if file == f:\n path = os.path.join(root, file)\n output.append(path)\n\n return output", "def find_files(substring, path, recursive=False,\n check_ext=None, ignore_invisible=True,\n ignore_substring=None):\n def check_file(f, path):\n if not (ignore_substring and ignore_substring in f):\n if substring in f:\n compl_path = os.path.join(path, f)\n if os.path.isfile(compl_path):\n return compl_path\n return False\n\n results = []\n\n if recursive:\n for par, nxt, fnames in os.walk(path):\n for f in fnames:\n fn = check_file(f, par)\n if fn:\n results.append(fn)\n\n else:\n for f in os.listdir(path):\n if ignore_invisible and f.startswith('.'):\n continue\n fn = check_file(f, path)\n if fn:\n results.append(fn)\n\n if check_ext:\n results = [r for r in results if os.path.splitext(r)[-1] == check_ext]\n\n return results", "def files_in_dir(search_dir, ignored_regex_objects: List) -> List:\n\n file_paths = []\n dir_list = os.listdir(search_dir)\n for filename in dir_list:\n\n search_dir_abspath = os.path.abspath(search_dir)\n full_name = os.path.join(search_dir_abspath, filename)\n if os.path.isdir(full_name):\n # ignore directory\n continue\n\n if os.path.islink(full_name):\n # ignore symlink\n # http://stackoverflow.com/questions/15718006/check-if-directory-is-symlink\n continue\n\n if expression_helper.is_string_matched_in_regular_expression_objects(filename, ignored_regex_objects):\n # ignore this file\n continue\n\n file_paths.append(filename)\n\n return file_paths", "def buildListOfFiles(searchGlob):\n return [fpath for fpath in glob2.iglob(searchGlob) if os.path.isfile(fpath)]", "def target_names(datatset_path):\n\n # to prevent confusion we avoid using nested listed comprehension\n folders = [f for f in sorted(os.listdir(datatset_path))\n if os.path.isdir(os.path.join(datatset_path, f))]\n target_names = [folder for folder in folders]\n return target_names", "def find_files(basedir, regexp):\n regexp = re.compile(regexp)\n return sorted(fn for fn in glob.glob(os.path.join(basedir, '**'),\n recursive=True)\n if regexp.match(fn))", "def walkdir_to_filelist(where, target, omit):\n log.debug(\"Scan {},searching {},ignoring {}\".format(where, target, omit))\n return tuple([os.path.join(r, f) for r, d, fs in os.walk(where)\n for f in fs if not f.startswith('.') and not f.endswith(omit)\n and f.endswith(target)]) # only target files,no hidden files" ]
[ "0.78893167", "0.70711875", "0.6743324", "0.67228645", "0.67215157", "0.67116743", "0.6680939", "0.66105115", "0.6609947", "0.6597764", "0.6591357", "0.65083253", "0.6409782", "0.6372986", "0.6369613", "0.6341358", "0.6335286", "0.6334085", "0.63313586", "0.6330451", "0.6330291", "0.6304401", "0.6285315", "0.62815964", "0.6274354", "0.6253434", "0.6230961", "0.6230926", "0.6209507", "0.6207961" ]
0.74844456
1
Creates samples from full dataframe using indices and n rows before. Returns a list of [joined texts from n rows before, current sentence].
def create_sample(df: pd.DataFrame, indices: list, n: int = 2) -> list: samples = [] for idx in indices: if idx <= n: continue samples.append([ ' '.join(df.loc[idx - n:idx - 1, 'article'].to_list()), df.loc[idx, 'article'] ]) return samples
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_sample(index_sentences, context_window_size=3):\n for index_sentence in index_sentences:\n for i, center in enumerate(index_sentence):\n for target in index_sentence[max(0, i - context_window_size): i]:\n yield center, target\n for target in index_sentence[i + 1: i + context_window_size + 1]:\n yield center, target", "def sample_rows(df, nrows):", "def target_sample(n, data = train):\n for i in range(n):\n sample = randint(0, len(data))\n print('Sample: ' + str(sample) + \"\\n\" + 'Target: ' + str(data.iloc[sample][\"target\"]) + '\\n' + 'Text: ' + data.iloc[sample][\"excerpt\"] + '\\n\\n'\n )", "def generate_sample(index_words, context_window_size):\n for index, center in enumerate(index_words):\n context = random.randint(1, context_window_size)\n # get a random target before the center word\n for target in index_words[max(0, index - context): index]:\n yield center, target\n # get a random target after the center wrod\n for target in index_words[index + 1: index + context + 1]:\n yield center, target", "def get_sample(df,n):\n idxs = sorted(np.random.permutation(len(df))[:n])\n return df.iloc[idxs].copy()", "def obtain_skipgram_dataset(corpus: list,\n window_size: int,\n ) -> pd.DataFrame:\n data = []\n for sentence in corpus:\n for idx, word in enumerate(sentence):\n lower, upper = idx - window_size, idx + window_size + 1\n \n # Find neighbours, exclude centre word (even if neighbour).\n neighbours = list(filter(\n lambda w: w != word, islice(sentence, (lower>0)*lower, upper)\n ))\n \n # Add all instances of centre word and its neighbours to data.\n data += [\n {'centre_word': word, 'context_word': neighbour} \n for neighbour in neighbours \n ]\n\n return pd.DataFrame(data)", "def sample_rides(df, nSample):\n return df.iloc[numpy.random.choice(df.index.values, nSample)]", "def generate_sample(sentences, vocab, window):\n for sentence in sentences:\n word_vocabs = [vocab[w] for w in sentence if w in vocab and\n vocab[w]['prob'] > np.random.rand()]\n\n for index, word in enumerate(word_vocabs):\n center = word['index']\n reduced_window = np.random.randint(1, window + 1)\n\n # words before the center word\n for context in word_vocabs[max(0, index - reduced_window):index]:\n target = context['index']\n yield center, target\n\n # words after the center word\n for context in word_vocabs[(index + 1):(index + 1 + reduced_window)]:\n target = context['index']\n yield center, target", "def get_slices(all_texts_df: pd.DataFrame,\n slice_length: int = 25,\n overlap_percent: float = 0) -> pd.DataFrame:\n\n if overlap_percent >= 1 or overlap_percent < 0:\n raise ValueError(\"Invalid overlap amount\")\n\n step = max(1, int(slice_length * (1 - overlap_percent)))\n\n all_slices: List[Tuple[Text, Text]] = []\n\n for row in all_texts_df.itertuples(index=False):\n tokens = row.tokens.split()\n snippets = [\n ' '.join(tokens[i:min(len(tokens), i + slice_length)])\n for i in range(0, len(tokens), step)\n ]\n all_slices += [(row.label, snippet) for snippet in snippets]\n\n return pd.DataFrame(all_slices, columns=[\"label\", \"slice\"])", "def train_test_samples(df):\n\n from math import floor\n\n shuffled_df = df.reindex(np.random.permutation(df.index))\n\n seventy_five_percent = int(floor(len(shuffled_df) * 0.75))\n train_df = shuffled_df.iloc[:seventy_five_percent, ]\n test_df = shuffled_df.iloc[seventy_five_percent:, ]\n\n return train_df, test_df", "def first_rows(self, n: int) -> \"SampleDataSet\":\n return SampleDataSet(self._data.iloc[:n].copy())", "def generate_sentence(self):\n if self.word_to_index is None:\n self.log.error(\"Need to load a model or data before this step.\")\n return []\n # Start sentence with the start token\n sentence = [self.word_to_index[self.sentence_start_token]]\n # Predict next word until end token is received\n while not sentence[-1] == self.word_to_index[self.sentence_end_token]:\n next_word_probs = self.forward_propagate(sentence)\n sampled_word = self.word_to_index[self.unknown_token]\n # We don't want the unknown token to appear in the sentence\n while sampled_word == self.word_to_index[self.unknown_token]:\n samples = np.random.multinomial(1, next_word_probs[-1])\n sampled_word = np.argmax(samples)\n sentence.append(sampled_word)\n sentence_str = [self.index_to_word[word] for word in sentence[1:-1]]\n return sentence_str", "def create_sample(self, sent, head_pred_id):\n return pandas.DataFrame({\"word\": sent,\n \"run_id\": [-1] * len(sent), # Mock running id\n \"head_pred_id\": head_pred_id})", "def create_sample(self, sent, head_pred_id):\n return pandas.DataFrame({\"word\": sent,\n \"run_id\": [-1] * len(sent), # Mock running id\n \"head_pred_id\": head_pred_id})", "def subsample(self, se):\n\t\tdf = ReadDF('noname', self.readdict.refmap)\n\t\tfor i in random.sample(xrange(1, self.n+1), min(se, self.n)):\n\t\t\tpos, read = self.partial_sampling_func(i)\n\t\t\tdf.add_read_to_vec(read,copy=1) # important to remember to use just this ONE copy!!!\n\t\treturn df", "def getSpeakerFirstSamples(df_samples, speaker_id, n_samples):\n df_res = df_samples.loc[df_samples['speaker_id'] == speaker_id].head(n_samples)\n return df_res", "def get_train_data(df):\n\n srch_order = []\n cat0 = df[df.category == 5].index\n cat1 = df[df.category == 1].index\n cat2 = df[df.category == 0].index\n amount = int(len(df) * .04)\n print(\"amount of rows selected: \", amount)\n\n cat2_selec = np.random.choice(cat2, amount, replace=False)\n\n cat012 = np.concatenate((cat0, cat1, cat2_selec))\n\n df_selection = df.loc[cat012]\n\n return df_selection", "def generate_n_sentences(self, results, n=100):\n logging.info(\"Attempting to generate dataset of size {}\".format(n))\n generated_sentences = []\n original_sentences = []\n for result in results[:n]:\n logging.debug(\"Original Generated sentence: {}\".format(\" \".join(result[\"sequence\"]).encode(\"utf8\")))\n token_list = self.replace_entities(result[\"sequence\"], result[\"sentence\"].entities)\n logging.debug(\"Generated sentence after entities replacement: {}\".format(\" \".join(token_list).encode(\"utf8\")))\n token_list = self.applies_capitalization(token_list, result[\"sentence\"].original_sentence)\n logging.debug(\"Generated sentence after capitalization: {}\".format(\" \".join(token_list).encode(\"utf8\")))\n generated_sentences.append(\" \".join(token_list))\n original_sentences.append(result[\"sentence\"].original_sentence)\n logging.info(\"Dataset generated has size {}\".format(len(generated_sentences)))\n return generated_sentences, original_sentences", "def sample_tissues(df,size):\n tissues_df = pd.DataFrame(df,columns=['tissueid','time','n']).drop_duplicates()\n sample = tissues_df.groupby('n').apply(lambda x: x.sample(size)) \n index = pd.MultiIndex.from_frame(sample) \n df = df.assign(i=[i for n in df.n for i in range(n)]) \n df.set_index(['tissueid','time','i'])[index]", "def _get_table_from_samples(self, index):\n df = pd.DataFrame()\n for sample in self.samples:\n sd = sample.to_dict()\n ser = pd.Series(\n {k: v for (k, v) in list(sd.items()) if not k.startswith(\"_\")}\n )\n df = df.append(ser, ignore_index=True)\n index = [index] if isinstance(index, str) else index\n if not all([i in df.columns for i in index]):\n _LOGGER.debug(\n \"Could not set {} index. At least one of the \"\n \"requested columns does not exist: {}\".\n format(CFG_SAMPLE_TABLE_KEY, index))\n return df\n _LOGGER.debug(\"Setting sample_table index to: {}\".format(index))\n df.set_index(keys=index, drop=False, inplace=True)\n return df", "def get_sequence_slices(df, target_seq, model_context_len, start_idx=1, scoring_window=\"optimal\", indel_mode=False):\n len_target_seq = len(target_seq)\n num_mutants = len(df['mutated_sequence'])\n df=df.reset_index(drop=True)\n if scoring_window==\"optimal\":\n df['mutation_barycenter'] = df['mutant'].apply(lambda x: int(np.array([int(mutation[1:-1]) - start_idx for mutation in x.split(':')]).mean())) if not indel_mode else df['mutated_sequence'].apply(lambda x: len(x)//2)\n df['scoring_optimal_window'] = df['mutation_barycenter'].apply(lambda x: get_optimal_window(x, len_target_seq, model_context_len)) if not indel_mode else df['mutated_sequence'].apply(lambda x: (0,len(x)))\n df['sliced_mutated_sequence'] = [df['mutated_sequence'][index][df['scoring_optimal_window'][index][0]:df['scoring_optimal_window'][index][1]] for index in range(num_mutants)]\n df['window_start'] = df['scoring_optimal_window'].map(lambda x: x[0]) \n df['window_end'] = df['scoring_optimal_window'].map(lambda x: x[1])\n del df['scoring_optimal_window'], df['mutation_barycenter']\n if 'mutant' in df: del df['mutant']\n df_wt=df.copy()\n df_wt['mutated_sequence'] = [target_seq] * num_mutants\n if indel_mode: # For indels, we set the wild type reference to be always the same (full length) sequence. We assume here that the length is lower than model context size (otherwise \"Sliding\" mode should be used)\n df_wt['window_end'] = df_wt['mutated_sequence'].map(lambda x:len(x))\n df_wt['sliced_mutated_sequence'] = [target_seq[df_wt['window_start'][index]:df_wt['window_end'][index]] for index in range(num_mutants)]\n df = pd.concat([df,df_wt], axis=0)\n df = df.drop_duplicates()\n elif scoring_window==\"sliding\":\n num_windows = 1 + int( len_target_seq / model_context_len)\n df_list=[]\n start=0\n for window_index in range(1, num_windows+1):\n df_sliced = df.copy()\n df_sliced['sliced_mutated_sequence'] = df_sliced['mutated_sequence'].map(lambda x: x[start:start+model_context_len]) \n df_sliced['window_start'] = [start] * num_mutants \n df_sliced['window_end'] = df_sliced['mutated_sequence'].map(lambda x: min(len(x), start+model_context_len)) \n df_sliced_wt = df_sliced.copy()\n df_sliced_wt['mutated_sequence'] = [target_seq] * num_mutants\n df_sliced_wt['sliced_mutated_sequence'] = df_sliced_wt['mutated_sequence'].map(lambda x: x[start:start+model_context_len])\n df_sliced_wt['window_end'] = df_sliced_wt['mutated_sequence'].map(lambda x: min(len(x), start+model_context_len)) #Need to adjust end index if WT and sequence are not same full length\n df_list.append(df_sliced)\n df_list.append(df_sliced_wt)\n start += model_context_len\n df_final = pd.concat(df_list,axis=0)\n if 'mutant' in df_final: del df_final['mutant']\n df = df_final.drop_duplicates()\n return df.reset_index(drop=True)", "def sampling(df: DataFrame, k: int) -> DataFrame:\n n = df.count()\n df = (df\n .withColumn('fake', F.lit(0))\n .withColumn('row', F.row_number().over(Window.partitionBy('fake').orderBy('_c0')))\n .drop('fake')\n )\n sample = get_spark().createDataFrame(random.sample(range(1, n + 1), k), T.IntegerType())\n return df.join(sample, on=df.row == sample.value).drop('row', 'value')", "def gather_rows_1(tt_mat, inds):\n cores = tt_mat.tt_cores\n slices = []\n batch_size = int(inds[0].shape[0])\n\n\n ranks = [int(tt_core.shape[0]) for tt_core in tt_mat.tt_cores] + [1, ]\n\n\n for k, core in enumerate(cores):\n i = inds[k]\n #core = core.permute(1, 0, 2, 3).to(inds.device)\n\n cur_slice = torch.index_select(core, 1, i)\n\n if k == 0:\n res = cur_slice\n\n else:\n res = res.view(batch_size, -1, ranks[k])\n curr_core = cur_slice.view(ranks[k], batch_size, -1)\n res = torch.einsum('oqb,bow->oqw', (res, curr_core))\n\n return res\n\n #slices.append(torch.index_select(core, 1, i).permute(1, 0, 2, 3))", "def make_tutorial_data(n: int) -> pd.DataFrame:\n np.random.seed(1111)\n\n dataset = pd.DataFrame({\n \"id\": list(map(lambda x: \"id%d\" % x, np.random.randint(0, 100, n))),\n \"date\": np.random.choice(pd.date_range(\"2015-01-01\", periods=100), n),\n \"feature1\": np.random.gamma(20, size=n),\n \"feature2\": np.random.normal(40, size=n),\n \"feature3\": np.random.choice([\"a\", \"b\", \"c\"], size=n)})\n\n dataset[\"target\"] = (dataset[\"feature1\"]\n + dataset[\"feature2\"]\n + dataset[\"feature3\"].apply(lambda x: 0 if x == \"a\" else 30 if x == \"b\" else 10)\n + np.random.normal(0, 5, size=n))\n\n # insert some NANs\n dataset.loc[np.random.randint(0, n, 100), \"feature1\"] = nan\n dataset.loc[np.random.randint(0, n, 100), \"feature3\"] = nan\n\n return dataset", "def _create_examples(self, df, mode):\n idx_tr, idx_te = next(ShuffleSplit(test_size=0.3, random_state=1234).split(df.title, df.totalViews))\n\n examples = []\n\n iterind = idx_tr if mode == \"train\" else idx_te\n\n for i in iterind:\n examples.append(\n InputExample(guid=i, text_a=df.title.values[i], label=df.totalViews.values[i]))\n\n return examples", "def generate_dataset(num_sequences=2**8):\n samples = []\n \n for _ in range(num_sequences): \n num_tokens = np.random.randint(1, 12)\n sample = ['a'] * num_tokens + ['b'] * num_tokens + ['EOS']\n samples.append(sample)\n \n return samples", "def pre_process_dataset(self):\n sentences = []\n idx = 1\n # Iterates of dataframe to collect sentences and labels\n for index, row in self.df.iterrows():\n # Normalizing and separate words of each sentence\n norm_sentence = self.norm_text(row['comment_text'])\n word_sentences = re.sub(\"[^\\w]\", \" \", norm_sentence).split()\n sentences.append(word_sentences)\n # Creating a word dictionary\n for word in word_sentences:\n if word not in self.word_2_idx:\n self.word_2_idx[word] = idx\n idx += 1\n # Getting all labels and creates a one-hot vector\n row_label = row[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']].values\n self.labels.append(row_label)\n\n # Collect word indexes from prepared word dictionary\n for words_sentence in sentences:\n self.input_data.append([self.word_2_idx[w] for w in words_sentence])", "def get_sentence_bow(self):\n for idx, (feature, df) in enumerate(self.df_extractions.groupby('feature')):\n self.feature_map[idx] = feature\n yield ' '.join(df.sentence_str.tolist())", "def generate_features(\n df: pd.DataFrame, spacy_model: str, language: str\n) -> pd.DataFrame:\n logging.info(\"Loading Spacy model...\")\n nlp = spacy.load(spacy_model)\n\n # Makes all tokens lowercase\n logging.info(\"Lowercase\")\n df[\"token_lower\"] = df[\"token\"].str.lower()\n\n logging.info(\"Lemma, pos\")\n spacy_pipe = nlp.pipe(df[\"token_lower\"].values, disable=[\"ner\", \"parser\"])\n features_gen = ((doc[0].lemma_, doc[0].pos_) for doc in spacy_pipe)\n df[\"lemma\"], df[\"pos\"] = zip(*features_gen)\n\n # Prepare stemmers\n logging.info(\"Loading Snowball Stemmer...\")\n snow = SnowballStemmer(language=language)\n\n logging.info(\"Snowball stemmer\")\n df[\"snowballStemmer\"] = df.apply(lambda row: snow.stem(row[\"token_lower\"]), axis=1)\n\n logging.info(\"Loading Porter Stemmer...\")\n port = PorterStemmer()\n\n logging.info(\"Porter stemmer\")\n df[\"porterStemmer\"] = df.apply(lambda row: port.stem(row[\"token_lower\"]), axis=1)\n\n # Adds columns with a binary if the word contains a possible negation prefix or suffix\n logging.info(\"Prefix\")\n df[\"possible_prefix\"] = df.apply(\n lambda row: possible_negation_prefix(row[\"token_lower\"]), axis=1\n )\n\n logging.info(\"Suffix\")\n df[\"possible_suffix\"] = df.apply(\n lambda row: possible_negation_suffix(row[\"token_lower\"]), axis=1\n )\n\n # Adds new columns for the previous and next lemma and pos-tag\n logging.info(\"Add prev/next shifts\")\n df[\"prev_Lemma\"] = df[\"lemma\"].shift(periods=1)\n df[\"next_Lemma\"] = df[\"lemma\"].shift(periods=-1)\n df[\"prev_pos\"] = df[\"pos\"].shift(periods=1)\n df[\"next_pos\"] = df[\"pos\"].shift(periods=-1)\n return df", "def random_sample(self, n):\n indices = random.sample(xrange(np.shape(self.data)[0]), n)\n table = DataTable(self.data[indices], self.dims, self.legends, self.tags.copy())\n return table" ]
[ "0.6704123", "0.62329084", "0.59288085", "0.5916634", "0.59048307", "0.57954526", "0.5702059", "0.5699596", "0.55778795", "0.5568846", "0.5542461", "0.5522564", "0.54517615", "0.54517615", "0.5437269", "0.54162866", "0.53923947", "0.537376", "0.5371694", "0.5367503", "0.5356609", "0.53244853", "0.5322174", "0.53069293", "0.52655196", "0.52593315", "0.52592665", "0.5253885", "0.5247743", "0.52419317" ]
0.7631288
0
Whether nonNone DB settings are set on this instance.
def db_settings_set(self) -> bool: return self._db_settings is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def db_settings(self) -> DBSettings:\n if self._db_settings is None:\n raise ValueError(\"No DB settings are set on this instance.\")\n return not_none(self._db_settings)", "def has_configuration_set():\r\n return getattr(settings, \"MICROSITE_CONFIGURATION\", False)", "def in_smartctl_database(self) -> bool:\n return self._in_smartctl_database", "def check_settings(self):\n pass", "def get_isreadytodb(cls):\n return cls.isreadytodb", "def apply_settings(self):\n return True", "def datadog_dbm_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"datadog_dbm_enabled\")", "def check_settings(self):\r\n pass", "def configured(self):\r\n return bool(self._wrapped)", "def is_persistent(self):\n return self.backend.is_persistent", "async def casino_is_global(self):\n return await self.db.Settings.Global()", "def is_configured(self):\n return True", "def get_settings():\n return db.get_data()", "def is_configured(self):\n return self._session is not None", "def needs_user_db(self):\n return False", "def is_configured(self):\n pass", "def ready(self):\n return self.settings is not None", "def rdb_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"rdb_enabled\")", "def pgsql_configured(self):\n if (\n self.kv.get(\"pgsql_host\")\n and self.kv.get(\"pgsql_port\")\n and self.kv.get(\"pgsql_db\")\n and self.kv.get(\"pgsql_user\")\n and self.kv.get(\"pgsql_pass\")\n ):\n hookenv.log(\n \"PostgreSQL is related and configured in the charm KV store: {}\".format(\n self.kv.get(\"pgsql_host\")\n ),\n hookenv.DEBUG,\n )\n return True\n hookenv.log(\n \"PostgreSQL is not yet configured in the charm KV store\", hookenv.WARNING\n )\n return False", "def isInitialized(self):\n\t\tif self.isTypeSet and self.isCfgSet:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def _cached_db_connections_enabled():\n with _use_cache_flags_lock:\n return _get_cache_identifier() in _use_cache_flags", "def check_config_mode(self):\n return False", "def test_databases_variable_exists(self):\n self.assertTrue(settings.DATABASES, f\"{flag}settings module does not have a databases variable{flag}\")\n self.assertTrue('default' in settings.DATABASES, f\"{flag}default database configuration correct{flag}\")", "def needs_commons_db(self):\n return False", "def authorization(cls):\n\n return PyFunceble.CONFIGURATION.db_type in [\"mariadb\", \"mysql\"]", "def _read_settings(self):\n\n # check if the object has pivotData attribute\n if self._object.hasAttr(\"pivotData\"):\n # get the future pivot object\n self._futurePivot = auxiliary.get_valid_dag_node(\n pm.listConnections(self._object.attr(\"pivotData.futurePivot\"))[0]\n )\n\n # set isSetup flag to True\n self._isSetup = True\n\n return True\n\n return False", "def db_session_is_localized(self):\n return self.__dict__.has_key('db_session')", "def test_db_ssl_enable(self):\n\n # Check default state is SSL on\n with mock.patch.dict('os.environ', REQUIRED_SETTINGS, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {'sslmode': 'require'}\n )\n\n # Check enabling the setting explicitly\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_DB_DISABLE_SSL': 'True'\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {}\n )\n\n # Disable it\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_DB_DISABLE_SSL': 'False'\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {'sslmode': 'require'}\n )", "def configured(self):\n return self._optional or self.ready()", "def check_settings_existence(self):\n options = [\n 'AUTH_LDAP_SERVER_URI',\n 'AUTH_LDAP_USER_SEARCH_BASE',\n 'AUTH_LDAP_USER_USERNAME_ATTR',\n 'AUTH_LDAP_PROTOCOL_VERSION',\n 'AUTH_LDAP_BIND_DN',\n 'AUTH_LDAP_BIND_PASSWORD',\n ]\n for option in options:\n if not hasattr(settings, option):\n logger.error('LDAP::check_settings_existence\\tSetting %s is '\n 'not provided', option)\n sys.exit(1)" ]
[ "0.8010844", "0.6257641", "0.61342895", "0.6113809", "0.6091597", "0.60814935", "0.6062348", "0.60489297", "0.6039835", "0.60359865", "0.5959397", "0.5921009", "0.5916373", "0.5904241", "0.5895474", "0.5873816", "0.5869111", "0.5860813", "0.58566284", "0.5839428", "0.58338577", "0.58075434", "0.5794813", "0.577748", "0.57602656", "0.5753721", "0.5749044", "0.57192254", "0.5708592", "0.5702461" ]
0.86004394
0
DB settings set on this instance; guaranteed to be nonNone.
def db_settings(self) -> DBSettings: if self._db_settings is None: raise ValueError("No DB settings are set on this instance.") return not_none(self._db_settings)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def db_settings_set(self) -> bool:\n return self._db_settings is not None", "def get_settings():\n return db.get_data()", "def config_db():", "def persistent_store_settings(self):\n ps_settings = (\n PersistentStoreDatabaseSetting(\n name='tethys_super',\n description='primary database',\n initializer='heda.model.init_primary_db',\n required=True\n ),\n )\n\n return ps_settings", "def get_db_params(self):\n return self.get_section_config('db')", "def setdefaults(self):\n self.config = {\n 'dbuser': Infopage.DEFAULT_DBUSER,\n 'dbname': Infopage.DEFAULT_DBNAME,\n 'dbpassword': Infopage.DEFAULT_DBPASSWORD,\n 'dbhost': Infopage.DEFAULT_DBHOST\n }", "def connectDb(self):\n self.db = Database('sqlite',self.settings.sqlitefilename)\n self.db.user = self.session.getAttribute(self.settings.authenvar)\n self.db.settings = self.settings\n self.db.logger = self.logger\n self.db.cgiparam = self.cgiparam\n self.db.writelog = self.writelog", "async def settings(self, ctx: BBContext):\n pass", "def settings(self) -> BaseSettings:\n return self._context.settings", "def settings(self) -> BaseSettings:\n return self._context.settings", "def _get_db_settings(self):\n config_path = os.path.expanduser(self.config.get_val('DATABASE_SETTINGS_FILE'))\n settings = {}\n with FileOperations.open(config_path, 'r') as f:\n for line in f:\n line = line.rstrip()\n # Ignore empty/comment lines.\n if not line or line.startswith('#'):\n continue\n try:\n key, value = line.split(':')\n settings[key.strip()] = value.strip()\n except ValueError:\n self.error_handler.abort_framework(\"Problem in config file: '%s' -> Cannot parse line: %s\" %\n (config_path, line))\n return settings", "def set_db(self, db):\n self._db = db", "def db(self):\r\n return self._db", "def get_test_settings():\n from youtube_podcast_api.config import Settings\n settings = Settings()\n settings.db_path = \"./sql_test.db\"\n return settings", "def settings(self):\n return self._settings", "def settings(self):\n return self._settings", "def config_db(self, conf_obj, conf_section='mysql'):\n self.conf = conf_obj\n self.section = conf_section", "def settings(self):\r\n return settings.Settings(self)", "def db(self):\n if self._for_write:\n return self._db or router.db_for_write(self.model, **self._hints)\n return self._db or router.db_for_read(self.model, **self._hints)", "def __init__(self, dbconfigs: dict) -> None:\n self.dbconfig = dbconfigs", "def db_config(self) -> \"DBConfigType\":\n if self._db_config is None:\n raise ConfigurationError(\n \"DB configuration not initialised. Make sure to call \"\n \"Tortoise.init with a valid configuration before attempting \"\n \"to create connections.\"\n )\n return self._db_config", "def get_db(self):\n return self._db", "def settings(self, settings):\n\n self._settings = settings", "def getDb(self):\n return self.db", "def _db_connection(self):\n pass", "def get_settings(self):\n return self.settings", "def _database(self):\n ...", "def set_db(db):\n global db_run # Imports the DB from the simulator\n db_run=db", "def __post_init__(self):\n self.dbase = databases.Database(\n self.dsn,\n min_size=self.min_size,\n max_size=self.max_size\n )\n self.engine, self.meta = self.get_engine_metadata()", "def settings(self):\n return {}" ]
[ "0.75341564", "0.6995988", "0.67702115", "0.66987073", "0.66237766", "0.6407037", "0.63871783", "0.6344887", "0.6317197", "0.6317197", "0.6270409", "0.62143266", "0.62112087", "0.61947787", "0.6192052", "0.6192052", "0.618446", "0.6180994", "0.6140911", "0.60939735", "0.60773456", "0.60490024", "0.6043922", "0.60428184", "0.6034189", "0.6024859", "0.6022242", "0.6020398", "0.59637374", "0.5960943" ]
0.8153518
0
Loads experiment and its corresponding generation strategy from database if DB settings are set on this `WithDBSettingsBase` instance.
def _load_experiment_and_generation_strategy( self, experiment_name: str ) -> Tuple[Optional[Experiment], Optional[GenerationStrategy]]: if not self.db_settings_set: raise ValueError("Cannot load from DB in absence of DB settings.") try: return load_experiment_and_generation_strategy( experiment_name=experiment_name, db_settings=self.db_settings ) except ValueError: return None, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_generation_strategy_by_experiment_name(\n experiment_name: str,\n decoder: Decoder,\n experiment: Optional[Experiment] = None,\n reduced_state: bool = False,\n) -> GenerationStrategy:\n gs_id = _get_generation_strategy_id(\n experiment_name=experiment_name, decoder=decoder\n )\n if gs_id is None:\n raise ValueError(\n f\"Experiment {experiment_name} does not have a generation strategy \"\n \"attached to it.\"\n )\n if not experiment:\n experiment = _load_experiment(\n experiment_name=experiment_name,\n decoder=decoder,\n reduced_state=reduced_state,\n )\n return _load_generation_strategy_by_id(\n gs_id=gs_id, decoder=decoder, experiment=experiment, reduced_state=reduced_state\n )", "def load_generation_strategy_by_experiment_name(\n experiment_name: str,\n config: Optional[SQAConfig] = None,\n experiment: Optional[Experiment] = None,\n reduced_state: bool = False,\n) -> GenerationStrategy:\n config = config or SQAConfig()\n decoder = Decoder(config=config)\n return _load_generation_strategy_by_experiment_name(\n experiment_name=experiment_name,\n decoder=decoder,\n experiment=experiment,\n reduced_state=reduced_state,\n )", "def populate_db(self, namedict, experiment_link=False):\n namedict = self.fix_namedict(namedict, 'experiments')\n if not experiment_link:\n self.cur.executemany(\n \"\"\"\n INSERT INTO experiments\n (\n experiment_name,\n model_struct,\n loss_function,\n regularization_type,\n regularization_strength,\n optimizer,\n lr,\n dataset,\n regularization_type_domain,\n regularization_strength_domain,\n optimizer_domain,\n lr_domain,\n timesteps,\n timesteps_domain,\n u_t_domain,\n q_t_domain,\n t_t_domain,\n p_t_domain,\n u_t,\n q_t,\n t_t,\n p_t,\n hp_optim,\n hp_max_studies,\n hp_current_iteration,\n experiment_iteration,\n normalize_labels,\n filter_size,\n filter_size_domain\n )\n VALUES\n (\n %(experiment_name)s,\n %(model_struct)s,\n %(loss_function)s,\n %(regularization_type)s,\n %(regularization_strength)s,\n %(optimizer)s,\n %(lr)s,\n %(dataset)s,\n %(regularization_type_domain)s,\n %(regularization_strength_domain)s,\n %(optimizer_domain)s,\n %(lr_domain)s,\n %(timesteps)s,\n %(timesteps_domain)s,\n %(u_t_domain)s,\n %(q_t_domain)s,\n %(t_t_domain)s,\n %(p_t_domain)s,\n %(u_t)s,\n %(q_t)s,\n %(t_t)s,\n %(p_t)s,\n %(hp_optim)s,\n %(hp_max_studies)s,\n %(hp_current_iteration)s,\n %(experiment_iteration)s,\n %(normalize_labels)s,\n %(filter_size)s,\n %(filter_size_domain)s\n )\n \"\"\",\n namedict)\n self.cur.execute(\n \"\"\"\n UPDATE experiments\n SET experiment_link=_id\n WHERE experiment_name=%(experiment_name)s\n \"\"\",\n namedict[0])\n else:\n self.cur.executemany(\n \"\"\"\n INSERT INTO experiments\n (\n experiment_name,\n model_struct,\n loss_function,\n regularization_type,\n regularization_strength,\n optimizer,\n lr,\n dataset,\n regularization_type_domain,\n regularization_strength_domain,\n optimizer_domain,\n lr_domain,\n timesteps,\n timesteps_domain,\n u_t_domain,\n q_t_domain,\n t_t_domain,\n p_t_domain,\n u_t,\n q_t,\n t_t,\n p_t,\n hp_optim,\n hp_max_studies,\n hp_current_iteration,\n experiment_iteration,\n normalize_labels,\n filter_size,\n filter_size_domain,\n experiment_link\n )\n VALUES\n (\n %(experiment_name)s,\n %(model_struct)s,\n %(loss_function)s,\n %(regularization_type)s,\n %(regularization_strength)s,\n %(optimizer)s,\n %(lr)s,\n %(dataset)s,\n %(regularization_type_domain)s,\n %(regularization_strength_domain)s,\n %(optimizer_domain)s,\n %(lr_domain)s,\n %(timesteps)s,\n %(timesteps_domain)s,\n %(u_t_domain)s,\n %(q_t_domain)s,\n %(t_t_domain)s,\n %(p_t_domain)s,\n %(u_t)s,\n %(q_t)s,\n %(t_t)s,\n %(p_t)s,\n %(hp_optim)s,\n %(hp_max_studies)s,\n %(hp_current_iteration)s,\n %(experiment_iteration)s,\n %(normalize_labels)s,\n %(filter_size)s,\n %(filter_size_domain)s,\n %(experiment_link)s\n )\n \"\"\",\n namedict)\n if self.status_message:\n self.return_status('INSERT')", "def _load_generation_strategy_by_id(\n gs_id: int,\n decoder: Decoder,\n experiment: Optional[Experiment] = None,\n reduced_state: bool = False,\n) -> GenerationStrategy:\n gs_sqa = _get_generation_strategy_sqa(\n gs_id=gs_id, decoder=decoder, reduced_state=reduced_state\n )\n return decoder.generation_strategy_from_sqa(\n gs_sqa=gs_sqa, experiment=experiment, reduced_state=reduced_state\n )", "def _load_experiment(\n experiment_name: str, decoder: Decoder, reduced_state: bool = False\n) -> Experiment:\n # Convert SQA to user-facing class outside of session scope to avoid timeouts\n return decoder.experiment_from_sqa(\n experiment_sqa=_get_experiment_sqa_reduced_state(\n experiment_name=experiment_name, decoder=decoder\n )\n if reduced_state\n else _get_experiment_sqa(experiment_name=experiment_name, decoder=decoder),\n reduced_state=reduced_state,\n )", "def only_experiments_db(storage, exp_config):\n for exp in exp_config[0]:\n storage.create_experiment(exp)", "def load_experiment(self):\n load_dir = select_dir(os.getcwd())\n if load_dir is not None:\n if os.path.isfile(os.path.join(load_dir, 'conf', 'config')):\n self.load_main(load_dir)\n else:\n msg_window('missing conf/config file, not experiment directory')\n return\n\n if self.t is None:\n self.t = Tabs(self)\n self.vbox.addWidget(self.t)\n self.t.clear_configs()\n self.t.load_conf(load_dir)\n\n self.set_experiment(True)\n else:\n msg_window('please select valid conf directory')", "def load_generation_strategy_by_id(\n gs_id: int,\n config: Optional[SQAConfig] = None,\n experiment: Optional[Experiment] = None,\n reduced_state: bool = False,\n) -> GenerationStrategy:\n config = config or SQAConfig()\n decoder = Decoder(config=config)\n return _load_generation_strategy_by_id(\n gs_id=gs_id, decoder=decoder, experiment=experiment, reduced_state=reduced_state\n )", "def test_load_experiment(self):\n exp = Experiment(self.epath,\n normalization='ch0',\n auto_alignment=False)\n self.assertTrue(isinstance(exp, Experiment))", "def load_inst(self):\n self.sanity_check()\n\n fname_pub_auth_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_auth_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_top, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_top, '_',\n self.config.experiment_id, '.pk'])\n self.pub_auth_all = pickle.load(open(fname_pub_auth_all, 'rb'))\n self.pub_auth_top = pickle.load(open(fname_pub_auth_top, 'rb'))\n self.pub_inst_all = pickle.load(open(fname_pub_inst_all, 'rb'))\n self.pub_inst_top = pickle.load(open(fname_pub_inst_top, 'rb'))\n\n fname_pub_history = ''.join([self.config.dir_data, '/history_',\n self.config.experiment_id, '.pk'])\n self.history = pickle.load(open(fname_pub_history, 'rb'))\n\n fname_pub_staff = ''.join([self.config.dir_data, '/staff_',\n self.config.experiment_id, '.pk'])\n self.staff = pickle.load(open(fname_pub_staff, 'rb'))", "def from_database(cls, expt_class=ImagingExperiment,\n name='unnamed', parallelize=False, **db_kwargs):\n trial_ids = fetch_trials(**db_kwargs)\n return cls.from_trial_ids(trial_ids, expt_class=expt_class,\n name=name, parallelize=parallelize)", "def load_db(path_to_db):\n db_run = db(path_to_db) # Instantiates the DB by reading the file\n db_run.import_config_db() # Imports configuration DB\n db_run.conn.row_factory = sqlite3.Row # Better select results\n return(db_run)", "def set_db(self, db_file=predicted_actions_db, cache_size=None):\n\n db = TinyDB(db_file)\n self.table = db.table(\"scenarios\", cache_size=cache_size)", "def _instantiate_graph_db(self):\n graph_db_name = self.conf_manager.get_graph_db()\n plugin_parameters = [self.conf_manager]\n self.graph_db = self._load_plugins([graph_db_name],\n common.GRAPH_PACKAGE,\n paths.GRAPH_DB_DIR,\n plugin_parameters)[0]", "def get_test_strategy(context, **kw):\n obj_cls = objects.Strategy\n db_data = db_utils.get_test_strategy(**kw)\n obj_data = _load_related_objects(context, obj_cls, db_data)\n\n return _load_test_obj(context, obj_cls, obj_data, **kw)", "def load_experiment(\n experiment_name: str,\n config: Optional[SQAConfig] = None,\n reduced_state: bool = False,\n) -> Experiment:\n config = config or SQAConfig()\n decoder = Decoder(config=config)\n return _load_experiment(\n experiment_name=experiment_name, decoder=decoder, reduced_state=reduced_state\n )", "def load(extended=False):\n\n _fetch_large()\n if extended:\n return _load(cache_experiment_extended, _parse_experiment)\n else:\n return _load(cache_experiment, _parse_experiment)", "def test_load_database_after_pickling(tmp_path):\n path = tmp_path / \"test.db\"\n database = load_database(path_or_database=path, fast_logging=False)\n database = pickle.loads(pickle.dumps(database))\n assert hasattr(database.engine, \"connect\")", "def _save_experiment_to_db_if_possible(\n self, experiment: Experiment, suppress_all_errors: bool = False\n ) -> bool:\n if self.db_settings_set:\n save_experiment(experiment=experiment, db_settings=self.db_settings)\n return True\n return False", "def _parse_synthetic_experiment(\n self,\n experiment: GeneratedExperiment,\n experiment_series: GeneratedExperimentSeries,\n ) -> dei.ExperimentFromDb:\n ParamType = Union[float, str, int, bool]\n LRIDictType = Dict[str, Dict[str, ParamType]]\n\n _stl_id = f\"sdg_stl_file_{self.get_random_id()}\"\n _series_id = f\"sdg_series_{experiment_series.experiment_series_id}\"\n _user_id = f\"sdg_user_{self.get_random_id()}\"\n _experiment_id = experiment.experiment_id\n _now = datetime.now()\n _material = dei.Material.from_dict(experiment.to_dict())\n\n rating_dict = experiment.qualities.copy()\n rating_dict['user_id'] = _user_id\n rating_dict['rating_date'] = _now\n rating_dict['comment'] = \"created by SDG\"\n _rating = dei.Rating.from_dict(rating_dict)\n\n # mock some environment measurements\n _measurements = [\n dei.Measurement(humidity=42., temperature=42., time=_now)\n ]\n\n # create non-influential mock environment influences, too\n _influences = [\n dei.Influence(\n material_color=dei.StringParameter(_material.material_color,\n False),\n material_type=dei.StringParameter(_material.material_type,\n False),\n material_producer=dei.StringParameter(\n _material.material_producer, False),\n temperature=dei.NumericParameter(_measurements[0].temperature,\n False),\n humidity=dei.NumericParameter(_measurements[0].humidity,\n False),\n )\n ]\n\n experiment_index = experiment_series.experiments.index(experiment)\n\n # define the selected parameters as a changed UI parameter\n if experiment_index != 0:\n _changed_ui_parameters = [\n dei.ChangedUIParameter(\n label=parameter,\n user_value=str(experiment.parameters[parameter]),\n key=parameter\n )\n for parameter in experiment.adjusted_parameters\n ]\n else:\n _changed_ui_parameters = []\n\n lri: Optional[dei.LastRatingInfluence] = None\n if experiment_index != 0:\n # LastRatingInfluences excpects camelCase dict keys instead of snake_case.\n last_rating_influences_dict: LRIDictType = {\n to_camel_case(key): {\n \"value\": value,\n \"influential\": key in experiment.optimized_qualities\n }\n for key, value in experiment.qualities.items()\n }\n last_rating_influences_dict['experiment_id'] = {\n \"value\": experiment_series.experiments[experiment_index - 1].experiment_id,\n \"influential\": False\n }\n\n # rename overall_ok to comply to interface as it is expected in the AIPE database\n ok_tup = last_rating_influences_dict[\"overallOk\"]\n del last_rating_influences_dict[\"overallOk\"]\n last_rating_influences_dict[\"overall\"] = ok_tup\n\n last_rating_influences_dict['comment'] = {\n \"value\": \"created by SDG\",\n \"influential\": False\n }\n lri = dei.LastRatingInfluence.from_dict(last_rating_influences_dict)\n\n _last_rating_influences = [lri]\n\n # bundle it all up into an Insight\n _insights = dei.Insights(\n comment=\"created by SDG\",\n influences=_influences,\n changed_ui_parameters=_changed_ui_parameters,\n user_id=_user_id,\n uncertainty=1.0,\n last_rating_influences=_last_rating_influences)\n # finally, wrap it all up into a nice experiment 🎁\n parsed_experiment = dei.ExperimentFromDb(\n id=_experiment_id,\n printer=\"sdg_printer\",\n stl_file_id=_stl_id,\n material=_material,\n oneoff=False,\n all_parameters=experiment.parameters,\n insights=_insights,\n measurements=_measurements,\n completion=1.0,\n ratings=[_rating],\n series_id=_series_id)\n\n return parsed_experiment", "def load_or_create_db(self):\n try:\n with open(self._filename, 'rb') as f:\n self.db = pickle.load(f)\n except FileNotFoundError:\n pass", "def initialize_test_db(self):\n # Create a test database and sync it with models.py\n # Handle a second test database for selenium use. Postgres uses\n # transactions which interfere with the Django server thread.\n settings.TEST_DATABASE_NAME = self.db_name\n connection.creation.create_test_db(verbosity=self.verbosity,\n autoclobber=True)\n # Hook for doing any extra initialization\n self.extra_init()\n # Load fixture data.\n call_command('loaddata', *self.fixtures, verbosity=self.verbosity)\n # Sync data and close connection\n connection.close()\n # If sqlite3 or Postgres is used, create a backup database to speed up\n # fixture reloading.\n if settings.DATABASE_ENGINE == 'postgresql_psycopg2':\n # connection.creation is used to overcome transaction management,\n # allowing to execute DROP and CREATE db commands.\n cursor = connection.cursor()\n connection.creation.set_autocommit()\n cursor.execute(\"DROP DATABASE IF EXISTS %s_backup\" % self.db_name)\n cursor.execute(\"CREATE DATABASE %s_backup WITH TEMPLATE %s\" % (\n self.db_name, self.db_name))\n if settings.DATABASE_ENGINE == 'sqlite3':\n self.db_path = os.path.join(PROJECT_PATH, settings.DATABASE_NAME)\n self.db_backup_path = '%s_backup' % self.db_path\n if self.db_path[-3:] == '.db':\n self.db_backup_path = '%s_backup.db' % self.db_path[:-3]\n shutil.copyfile(self.db_path, self.db_backup_path)\n # Restore the database names as create_test_db changed it.\n settings.TEST_DATABASE_NAME = self.test_database_name\n settings.DATABASE_NAME = self.database_name", "def _save_generation_strategy_to_db_if_possible(\n self, generation_strategy: GenerationStrategy, suppress_all_errors: bool = False\n ) -> bool:\n if self.db_settings_set:\n save_generation_strategy(\n generation_strategy=generation_strategy, db_settings=self.db_settings\n )\n return True\n return False", "def test_evolve_load_generation(logger):\n\n generations = 2\n layers.packet.Packet.reset_restrictions()\n\n options = {}\n options[\"population_size\"] = 2\n options[\"in-trees\"] = 0\n options[\"out-trees\"] = 1\n options[\"in-actions\"] = 0\n options[\"out-actions\"] = 3\n options[\"library\"] = False\n options[\"seed\"] = None\n\n for generation_index in range(generations):\n population = []\n population_str = ''\n\n # Generate random strategies to initialize the population\n for i in range(options[\"population_size\"]):\n p = evolve.generate_strategy(logger, options[\"in-trees\"], options[\"out-trees\"], options[\"in-actions\"],\n options[\"out-actions\"],\n options[\"seed\"], environment_id=None)\n print(str(p))\n actions.utils.parse(str(p), logger)\n population.append(p)\n if i == options[\"population_size\"] - 1:\n population_str += str(p)\n else:\n population_str += str(p) + \"\\n\"\n\n # Write the generation file\n filename = os.path.join(test_files_directory, \"generation\" + str(generation_index))\n evolve.write_generation(filename, population)\n\n cmd = [\n \"--population\", \"3\",\n \"--generations\", \"1\",\n \"--test-type\", \"http\",\n \"--load-from\", filename,\n \"--port\", \"80\",\n \"--protos\", \"ip,udp,tcp,dns,dnsqr\",\n \"--censor\", \"censor2\",\n \"--log\", \"debug\",\n \"--no-skip-empty\",\n ]\n print(evolve.driver(cmd))", "def load_testdb(c, dbname=\"test_template\", fpath=\"tests/test_db.sql\"):\n default_env = {\n \"PATH\": os.environ[\"PATH\"],\n \"PYTHONPATH\": os.path.abspath(os.path.dirname(__file__)),\n \"LANG\": \"en_US.UTF-8\",\n \"POSTGRES_DB\": dbname,\n \"POSTGRES_HOST\": \"localhost\",\n \"POSTGRES_USER\": \"postgres\",\n \"POSTGRES_PORT\": \"5432\",\n }\n\n env = os.environ\n env.update(default_env)\n\n psql_command = (\n f'psql -h {default_env[\"POSTGRES_HOST\"]} '\n f'-p {default_env[\"POSTGRES_PORT\"]} '\n f'-U {default_env[\"POSTGRES_USER\"]}'\n )\n\n c.run(f'{psql_command} postgres -c \"drop database if exists {dbname}\";', env=env)\n c.run(f'{psql_command} postgres -c \"create database {dbname}\";', env=env)\n c.run(f\"{psql_command} {dbname} < {fpath}\", env=env)\n # update test db to the latest migrations\n c.run(f\"alembic -c ./alembic.ini upgrade head\", env=env)", "def init_database(self):\n init_database(self.engine)", "def test_create_experiment_debug_mode(self, tmp_path):\n\n conf_file = str(tmp_path / \"db.pkl\")\n\n experiment = create_experiment(\n config[\"name\"],\n space={\"x\": \"uniform(0, 10)\"},\n storage={\n \"type\": \"legacy\",\n \"database\": {\"type\": \"pickleddb\", \"host\": conf_file},\n },\n )\n\n storage = experiment._experiment._storage\n assert isinstance(storage, Legacy)\n assert isinstance(storage._db, PickledDB)\n\n experiment = create_experiment(\n config[\"name\"],\n space={\"x\": \"uniform(0, 10)\"},\n storage={\"type\": \"legacy\", \"database\": {\"type\": \"pickleddb\"}},\n debug=True,\n )\n\n storage = experiment._experiment._storage\n assert isinstance(storage, Legacy)\n assert isinstance(storage._db, EphemeralDB)", "def _load_db(self):\n for type_ in self._types:\n try:\n type_.table(self._metadata)\n except InvalidRequestError:\n pass\n # Reflect metadata so auto-mapping works\n self._metadata.reflect(self._engine)\n # Make sure the tables exist\n self._metadata.create_all()", "def load_static():\n\n for i, row in enumerate(open(\"seed_data/homepage_feature.static\")):\n row = row.rstrip()\n title, body, img_path_xs, img_path_sm, img_path_md, img_path_lg, is_active = row.split(\"|\")\n homepage_feature = HomepageFeatureModel(title=title,\n body=body,\n img_path_xs=img_path_xs,\n img_path_sm=img_path_sm,\n img_path_md=img_path_md,\n img_path_lg=img_path_lg,\n is_active=is_active)\n db.session.add(homepage_feature)\n\n for i, row in enumerate(open(\"seed_data/help_article.static\")):\n row = row.rstrip()\n title, description, body = row.split(\"|\")\n help_article = HelpArticleModel(title=title, \n description=description, \n body=body)\n db.session.add(help_article)\n\n db.session.commit()", "def init_database(self):\n # init_database(self.engine)" ]
[ "0.606756", "0.59268576", "0.5774875", "0.56967384", "0.56148547", "0.560851", "0.55462694", "0.5474125", "0.54704547", "0.5441102", "0.5435659", "0.5381648", "0.5344024", "0.52874494", "0.52411336", "0.52409303", "0.5229423", "0.52244365", "0.5214454", "0.51805246", "0.51445365", "0.51433516", "0.5136363", "0.5102985", "0.5072156", "0.5044175", "0.5039006", "0.5035012", "0.5028296", "0.5014957" ]
0.75344306
0
Saves attached experiment and generation strategy if DB settings are set on this `WithDBSettingsBase` instance.
def _save_experiment_to_db_if_possible( self, experiment: Experiment, suppress_all_errors: bool = False ) -> bool: if self.db_settings_set: save_experiment(experiment=experiment, db_settings=self.db_settings) return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _saveExperiment(self, experiment, path):\n Experiment.save(experiment, path);", "def save_db(self) -> None:", "def _save_generation_strategy_to_db_if_possible(\n self, generation_strategy: GenerationStrategy, suppress_all_errors: bool = False\n ) -> bool:\n if self.db_settings_set:\n save_generation_strategy(\n generation_strategy=generation_strategy, db_settings=self.db_settings\n )\n return True\n return False", "def saveSettings(self):\n self.genFiles.applyData()\n self.genGraph.applyData()", "def store_experiment(self, mode=\"r\", featureDesp=\"all\"):\n self.log.info(\"Storing Experiment\")\n\n with self.con:\n cur = self.con.cursor()\n query = \"INSERT INTO BG_experiment_run (model, parameters, svn_rev, experiment_id, features, `type`) \" \\\n \" VALUES (%(model)s, %(parameters)s, %(svn_rev)s, %(experiment_id)s, %(features)s, %(type)s)\"\n cur.execute(query, {\n 'model': self.algorithm,\n 'parameters': \"-\",\n 'svn_rev': self.svn_rev,\n 'experiment_id': self.batchId,\n 'features': featureDesp,\n 'type': mode\n })\n return cur.lastrowid", "def save(self):\n with open(os.path.join(self.save_path, \"experiment.delira.pkl\"),\n \"wb\") as f:\n pickle.dump(self, f)\n\n self.params.save(os.path.join(self.save_path, \"parameters\"))", "def save_inst(self):\n self.sanity_check()\n self.data_loaded_check()\n\n fname_pub_auth_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_auth_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_top, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_top, '_',\n self.config.experiment_id, '.pk'])\n\n pickle.dump(self.pub_auth_all, open(fname_pub_auth_all, 'wb'))\n pickle.dump(self.pub_auth_top, open(fname_pub_auth_top, 'wb'))\n pickle.dump(self.pub_inst_all, open(fname_pub_inst_all, 'wb'))\n pickle.dump(self.pub_inst_top, open(fname_pub_inst_top, 'wb'))\n\n fname_pub_history = ''.join([self.config.dir_data, '/history_',\n self.config.experiment_id, '.pk'])\n pickle.dump(self.history, open(fname_pub_history, 'wb'))\n\n fname_pub_staff = ''.join([self.config.dir_data, '/staff_',\n self.config.experiment_id, '.pk'])\n pickle.dump(self.staff, open(fname_pub_staff, 'wb'))", "def save_experiment(self, file_name: str, ovr_if_exists=False) -> None:\n if not self.populated:\n raise ExperimentException(\"Empty experiment class cannot be saved. Load or analyze experiment first.\")\n if ovr_if_exists:\n dfile = h5py.File(file_name, \"w\")\n else:\n dfile = h5py.File(file_name, \"x\")\n try:\n dfile.create_dataset(\"version\", data=self.version) # for later backwards compatibility\n # save general experiment data\n dfile.create_dataset(\"experiment_name\", data=self.experiment_name)\n dfile.create_dataset(\"original_path\", data=self.original_path)\n dfile.create_dataset(\"scope_name\", data=self.scope_name)\n dfile.create_dataset(\"comment\", data=self.comment)\n dfile.create_dataset(\"n_planes\", data=self.n_planes)\n dfile.create_dataset(\"tail_frame_rate\", data=self.tail_frame_rate)\n # save singular parameter dictionary\n self._save_dictionary(self.info_data, \"info_data\", dfile)\n # save augmentation flag\n if int(self.version) > 1:\n dfile.create_dataset(\"tail_data_augmented\", data=self.tail_data_augmented)\n # save per-plane data\n for i in range(self.n_planes):\n plane_group = dfile.create_group(str(i))\n self._save_dictionary(self.scanner_data[i], \"scanner_data\", plane_group)\n if len(self.tail_data) > 0:\n plane_group.create_dataset(\"tail_data\", data=self.tail_data[i], compression=\"gzip\",\n compression_opts=5)\n if self.bout_data[i] is not None:\n plane_group.create_dataset(\"bout_data\", data=self.bout_data[i], compression=\"gzip\",\n compression_opts=5)\n else:\n # no bouts were found, save dummy array of one line of np.nan\n bd = np.full((1, 8), np.nan)\n plane_group.create_dataset(\"bout_data\", data=bd, compression=\"gzip\", compression_opts=5)\n plane_group.create_dataset(\"tail_frame_time\", data=self.tail_frame_times[i])\n if int(self.version) > 1 and len(self.replaced_tail_frames) > 0:\n plane_group.create_dataset(\"replaced_tail_frames\", data=self.replaced_tail_frames[i],\n compression=\"gzip\", compression_opts=5)\n if len(self.laser_data) > 0:\n plane_group.create_dataset(\"laser_data\", data=self.laser_data[i], compression=\"gzip\",\n compression_opts=5)\n plane_group.create_dataset(\"projection\", data=self.projections[i], compression=\"gzip\",\n compression_opts=5)\n plane_group.create_dataset(\"func_stack\", data=self.func_stacks[i], compression=\"gzip\",\n compression_opts=5)\n if len(self.anat_projections) > 0: # this is a dual-channel experiment\n plane_group.create_dataset(\"anat_projection\", data=self.anat_projections[i], compression=\"gzip\",\n compression_opts=5)\n plane_group.create_dataset(\"C\", data=self.all_c[i], compression=\"gzip\", compression_opts=5)\n plane_group.create_dataset(\"dff\", data=self.all_dff[i], compression=\"gzip\", compression_opts=5)\n plane_group.create_dataset(\"centroids\", data=self.all_centroids[i], compression=\"gzip\",\n compression_opts=5)\n plane_group.create_dataset(\"sizes\", data=self.all_sizes[i], compression=\"gzip\", compression_opts=5)\n plane_group.create_dataset(\"spatial\", data=self.all_spatial[i], compression=\"gzip\", compression_opts=5)\n # due to mixed python types in caiman parameter dictionaries these currently get pickled\n ps = json.dumps(self.mcorr_dicts[i])\n plane_group.create_dataset(\"mcorr_dict\", data=ps)\n ps = json.dumps(self.cnmf_extract_dicts[i])\n plane_group.create_dataset(\"cnmf_extract_dict\", data=ps)\n ps = json.dumps(self.cnmf_val_dicts[i])\n plane_group.create_dataset(\"cnmf_val_dict\", data=ps)\n finally:\n dfile.close()", "def saveSettings(self):\n helpers.saveFile(self.dataDir, self.settingsFilename, json.dumps(self.settings))", "def _save_new_trial_to_db_if_possible(\n self,\n experiment: Experiment,\n trial: BaseTrial,\n suppress_all_errors: bool = False,\n ) -> bool:\n if self.db_settings_set:\n save_new_trial(\n experiment=experiment, trial=trial, db_settings=self.db_settings\n )\n return True\n return False", "def get_save_strategy(self):\r\n return self.save_strategy", "def save(self, filepath):\n save_ckpt = {\n 'ae': self.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }\n try:\n torch.save(save_ckpt, os.path.join(filepath, 'ckpt_ae.pth'))\n except:\n print('Cannot save autoencoder.')", "def save_experiment_config(self):\n\n if (self.use_dist and dist.get_rank() == 0) or not self.use_dist:\n logfile = os.path.join(self.experiment_dir, 'parameters.txt')\n log_file = open(logfile, 'w')\n log_file.write('\\n')\n json.dump(self.args.__dict__, log_file, indent=2)\n log_file.write('\\n')\n log_file.close()", "def save(\n self,\n suppress_errors: bool = True,\n max_workers: int = 3,\n save_figures: bool = True,\n save_children: bool = True,\n ) -> None:\n # TODO - track changes\n if not self._service:\n LOG.warning(\n \"Experiment cannot be saved because no experiment service is available. \"\n \"An experiment service is available, for example, \"\n \"when using an IBM Quantum backend.\"\n )\n if suppress_errors:\n return\n else:\n raise ExperimentDataSaveFailed(\"No service found\")\n if max_workers > self._max_workers_cap:\n LOG.warning(\n \"max_workers cannot be larger than %s. Setting max_workers = %s now.\",\n self._max_workers_cap,\n self._max_workers_cap,\n )\n max_workers = self._max_workers_cap\n self._save_experiment_metadata(suppress_errors=suppress_errors)\n if not self._created_in_db:\n LOG.warning(\"Could not save experiment metadata to DB, aborting experiment save\")\n return\n\n analysis_results_to_create = []\n for result in self._analysis_results.values():\n analysis_results_to_create.append(result._db_data)\n try:\n self.service.create_analysis_results(\n data=analysis_results_to_create,\n blocking=True,\n json_encoder=self._json_encoder,\n max_workers=max_workers,\n )\n for result in self._analysis_results.values():\n result._created_in_db = True\n except Exception as ex: # pylint: disable=broad-except\n # Don't automatically fail the experiment just because its data cannot be saved.\n LOG.error(\"Unable to save the experiment data: %s\", traceback.format_exc())\n if not suppress_errors:\n raise ExperimentDataSaveFailed(\n f\"Analysis result save failed\\nError Message:\\n{str(ex)}\"\n ) from ex\n\n for result in self._deleted_analysis_results.copy():\n with service_exception_to_warning():\n self._service.delete_analysis_result(result_id=result)\n self._deleted_analysis_results.remove(result)\n\n if save_figures:\n with self._figures.lock:\n figures_to_create = []\n for name, figure in self._figures.items():\n if figure is None:\n continue\n # currently only the figure and its name are stored in the database\n if isinstance(figure, FigureData):\n figure = figure.figure\n LOG.debug(\"Figure metadata is currently not saved to the database\")\n if isinstance(figure, pyplot.Figure):\n figure = plot_to_svg_bytes(figure)\n figures_to_create.append((figure, name))\n self.service.create_figures(\n experiment_id=self.experiment_id,\n figure_list=figures_to_create,\n blocking=True,\n max_workers=max_workers,\n )\n\n for name in self._deleted_figures.copy():\n with service_exception_to_warning():\n self._service.delete_figure(experiment_id=self.experiment_id, figure_name=name)\n self._deleted_figures.remove(name)\n\n if not self.service.local and self.verbose:\n print(\n \"You can view the experiment online at \"\n f\"https://quantum-computing.ibm.com/experiments/{self.experiment_id}\"\n )\n # handle children, but without additional prints\n if save_children:\n for data in self._child_data.values():\n original_verbose = data.verbose\n data.verbose = False\n data.save(\n suppress_errors=suppress_errors,\n max_workers=max_workers,\n save_figures=save_figures,\n )\n data.verbose = original_verbose", "def _save_updated_trial_to_db_if_possible(\n self,\n experiment: Experiment,\n trial: BaseTrial,\n suppress_all_errors: bool = False,\n ) -> bool:\n if self.db_settings_set:\n save_updated_trial(\n experiment=experiment, trial=trial, db_settings=self.db_settings\n )\n return True\n return False", "def save_config(self):\n\n return self.perform_action('/mgmtd/db/save')", "def save(self):\n self.backend.save(list(self._d.items()))\n log.debug(\"save: {}\".format(self.backend.filename))", "def _save(self):\n self.logger.debug(\"Saving to persistence\")\n try:\n data = self.persistence_serialize()\n except NotImplementedError:\n # allow backwards compatibility or persisted_values way\n # generate item to be persisted by gathering all variables\n # to be persisted into a dictionary\n data = {persisted_var: getattr(self, persisted_var)\n for persisted_var in self.persisted_values()}\n\n # save generated dictionary under block's id\n self._persistence.save(data, self.id())", "def save_settings(self):\n settings = {'camera': self.comboCamera.currentIndex(),\n 'rotation': self.comboRotation.currentIndex(),\n 'colors': {\n 'min_hue': self.spinMinHue.value(),\n 'max_hue': self.spinMaxHue.value(),\n 'min_saturation': self.spinMinSaturation.value(),\n 'max_saturation': self.spinMaxSaturation.value(),\n 'min_value': self.spinMinValue.value(),\n 'max_value': self.spinMaxValue.value(),\n }, 'diameter': self.spinDiameter.value(),\n 'lifter': self.lineEditLifter.text(),\n 'save_video': self.checkSaveVideo.isChecked()\n }\n settings_file = open('./resources/settings.json', 'w')\n json.dump(settings, settings_file, indent=4)\n settings_file.close()\n self.statusbar.clearMessage()\n self.statusbar.showMessage('Settings saved.', 5000)", "def save_settings(self):\n with open(self.settings_path, \"w\") as f:\n json.dump(self.settings, f, indent=4)", "async def save(self, job, options=None):\n if options is None:\n options = {}\n\n if not options.get('secretseed'):\n bundle = False\n filename = '/data/freenas-v1.db'\n else:\n bundle = True\n filename = tempfile.mkstemp()[1]\n os.chmod(filename, 0o600)\n with tarfile.open(filename, 'w') as tar:\n tar.add('/data/freenas-v1.db', arcname='freenas-v1.db')\n tar.add('/data/pwenc_secret', arcname='pwenc_secret')\n\n def read_write():\n with open(filename, 'rb') as f:\n f2 = os.fdopen(job.write_fd, 'wb')\n while True:\n read = f.read(1024)\n if read == b'':\n break\n f2.write(read)\n f2.close()\n await self.middleware.run_in_thread(read_write)\n\n if bundle:\n os.remove(filename)", "def save(self, i_episode):\n if i_episode % self.state.config.save_freq == 0:\n if self.state._models is None:\n self.register_models()\n save_dir = os.path.join(MODULE_CONFIG.BaseConfig.PATH_CHECKPOINT, str(i_episode))\n Directories.mkdir(save_dir)\n for k, model in self.state._models.items():\n model.save(\n file_name_with_path=os.path.join(save_dir,\n f'e_{i_episode}_{k if model.name == \"\" else model.name}.th'))\n\n with open(os.path.join(save_dir, f\"e_{i_episode}.meta\"), 'w') as f:\n json.dump(self.state, f, cls=CustomJsonEncoder, indent=2)\n _exp_meta = json.load(open(os.path.join(MODULE_CONFIG.BaseConfig.BASE_DIR, '..', '..',\n MODULE_CONFIG.BaseConfig.EXPERIMENTS_META_NAME + '.json')))\n _exp_name = MODULE_CONFIG.BaseConfig.BASE_DIR.split('/')[-2]\n _exp_run = MODULE_CONFIG.BaseConfig.BASE_DIR.split('/')[-1]\n _exp_meta[_exp_name][_exp_run]['available_checkpoints'].append(i_episode)\n json.dump(_exp_meta, open(os.path.join(MODULE_CONFIG.BaseConfig.BASE_DIR, '..', '..',\n MODULE_CONFIG.BaseConfig.EXPERIMENTS_META_NAME + '.json'), 'w'),\n indent=2)", "def save_settings(path, server, station):\n db.save_data(path, server, station)", "def _save_settings(self):\n # data to be save :\n # -----------------\n # futurePivot node\n\n # create attributes\n self._create_data_attribute()\n\n # connect futurePivot node\n pm.connectAttr(\n \"%s%s\" % (self._futurePivot.name(), \".message\"),\n self._object.attr(\"pivotData.futurePivot\"),\n f=True,\n )", "def saveDatabase():\r\n debug.write(\"saveDatabase processing\", 1)\r\n \"\"\" Only process if turbo mode is off \"\"\"\r\n if not currentTurboMode:\r\n debug.write(\"turbo mode off, process the save\", 1)\r\n \"\"\" Update all the player's stats gained and commit the database\"\"\"\r\n for player in players:\r\n debug.write(\"Commiting indivudal players to the virtual database: %s\" % player.name, 2)\r\n player.commit()\r\n debug.write(\"Attempting to save the database itself\", 1)\r\n database.save()\r\n debug.write(\"SQLite database saved\", 1)\r\n debug.write(\"Creating the event\", 1)\r\n \"\"\" Create and fire the event \"\"\"\r\n values = {\"type\":(\"setstring\", str(saveType))}\r\n gamethread.delayed(0, fireEvent, (\"sourcerpg_databasesaved\", values))\r\n debug.write(\"Event fired\", 1)\r\n \r\n \"\"\" Create a loop if we need to \"\"\"\r\n if str( saveType ) == \"intervals\":\r\n gamethread.delayedname(float(saveLength), 'sourcerpg_databasesave', saveDatabase)\r\n debug.write(\"saveDatabase processed\", 1)", "def save(self, db):\n pass", "def save_parms(self, save_to_db=False, parms_file=None):\n _db = {}\n _db['inputs'] = [item.parms['job_name'] for item in self.get_renderable_inputs()]\n _db['class_name'] = self.__class__.__name__\n _db['backend_name'] = self.manager.__class__.__name__\n _db['parms'] = self.parms\n\n if not parms_file:\n parms_file = os.path.expandvars(self.parms['script_path'])\n parms_file = os.path.join(parms_file, self.parms['job_name']) + \".json\"\n\n with open(parms_file, 'w') as file:\n result = json.dump(_db, file, indent=2)\n return result, parms_file", "def save(self):\r\n debug.write(\"[SourceRPG] Handling SQL Save\", 1)\r\n if self.path != \":memory:\":\r\n debug.write(\"Path is not in memory\", 2, False)\r\n if currentTurboMode is False:\r\n debug.write(\"We are not in turbo mode\", 2, False)\r\n self.connection.commit()\r\n debug.write(\"[SourceRPG] SQL Save handled\", 1)", "def save(self, epoch='best'):\n torch.save(self.qa_module.state_dict(), self.config['squad_model_path'].format(epoch))\n\n self.logger.info(f'Saved SQuAD model in {self.config[\"squad_model_path\"].format(epoch)}')", "def write(self):\n db_handle = open(settings.DATA_PATH, 'wb')\n cPickle.dump(dict(self), db_handle)\n db_handle.close()" ]
[ "0.63763595", "0.6320159", "0.6281932", "0.6186798", "0.61095285", "0.6107234", "0.6069167", "0.6063899", "0.5993819", "0.59579563", "0.59279996", "0.5842179", "0.5783171", "0.5780858", "0.5776538", "0.57733697", "0.5734225", "0.5729373", "0.5707211", "0.569653", "0.566799", "0.5655397", "0.5650262", "0.56305224", "0.56118363", "0.56081057", "0.56045455", "0.56000555", "0.55989903", "0.5598804" ]
0.6830284
0
Saves given trial and generation strategy if DB settings are set on this `WithDBSettingsBase` instance.
def _save_new_trial_to_db_if_possible( self, experiment: Experiment, trial: BaseTrial, suppress_all_errors: bool = False, ) -> bool: if self.db_settings_set: save_new_trial( experiment=experiment, trial=trial, db_settings=self.db_settings ) return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _save_generation_strategy_to_db_if_possible(\n self, generation_strategy: GenerationStrategy, suppress_all_errors: bool = False\n ) -> bool:\n if self.db_settings_set:\n save_generation_strategy(\n generation_strategy=generation_strategy, db_settings=self.db_settings\n )\n return True\n return False", "def _save_updated_trial_to_db_if_possible(\n self,\n experiment: Experiment,\n trial: BaseTrial,\n suppress_all_errors: bool = False,\n ) -> bool:\n if self.db_settings_set:\n save_updated_trial(\n experiment=experiment, trial=trial, db_settings=self.db_settings\n )\n return True\n return False", "def get_save_strategy(self):\r\n return self.save_strategy", "def save_settings(path, server, station):\n db.save_data(path, server, station)", "def save_db(self) -> None:", "def store_experiment(self, mode=\"r\", featureDesp=\"all\"):\n self.log.info(\"Storing Experiment\")\n\n with self.con:\n cur = self.con.cursor()\n query = \"INSERT INTO BG_experiment_run (model, parameters, svn_rev, experiment_id, features, `type`) \" \\\n \" VALUES (%(model)s, %(parameters)s, %(svn_rev)s, %(experiment_id)s, %(features)s, %(type)s)\"\n cur.execute(query, {\n 'model': self.algorithm,\n 'parameters': \"-\",\n 'svn_rev': self.svn_rev,\n 'experiment_id': self.batchId,\n 'features': featureDesp,\n 'type': mode\n })\n return cur.lastrowid", "def _save_experiment_to_db_if_possible(\n self, experiment: Experiment, suppress_all_errors: bool = False\n ) -> bool:\n if self.db_settings_set:\n save_experiment(experiment=experiment, db_settings=self.db_settings)\n return True\n return False", "def has_save_strategy(self):\r\n return self.save_strategy is not None", "def save(self, epoch='best'):\n torch.save(self.qa_module.state_dict(), self.config['squad_model_path'].format(epoch))\n\n self.logger.info(f'Saved SQuAD model in {self.config[\"squad_model_path\"].format(epoch)}')", "def saveSettings(self):\n self.genFiles.applyData()\n self.genGraph.applyData()", "def test_default_save_strategy_should_be_always(self):\r\n v = DefaultModelLevelSaveStrategy.create(val=1)\r\n assert 'val' in v.as_save_params()\r\n v.val = 2\r\n assert 'val' in v.as_save_params()\r\n v.save()", "def saveToolSettings(*args, **kwargs)->None:\n pass", "def saveSettings(self):\n helpers.saveFile(self.dataDir, self.settingsFilename, json.dumps(self.settings))", "def save(self):\n db = DBStorage()\n p = self.createPatient()\n db.add_prescription(p)", "def saveDatabase():\r\n debug.write(\"saveDatabase processing\", 1)\r\n \"\"\" Only process if turbo mode is off \"\"\"\r\n if not currentTurboMode:\r\n debug.write(\"turbo mode off, process the save\", 1)\r\n \"\"\" Update all the player's stats gained and commit the database\"\"\"\r\n for player in players:\r\n debug.write(\"Commiting indivudal players to the virtual database: %s\" % player.name, 2)\r\n player.commit()\r\n debug.write(\"Attempting to save the database itself\", 1)\r\n database.save()\r\n debug.write(\"SQLite database saved\", 1)\r\n debug.write(\"Creating the event\", 1)\r\n \"\"\" Create and fire the event \"\"\"\r\n values = {\"type\":(\"setstring\", str(saveType))}\r\n gamethread.delayed(0, fireEvent, (\"sourcerpg_databasesaved\", values))\r\n debug.write(\"Event fired\", 1)\r\n \r\n \"\"\" Create a loop if we need to \"\"\"\r\n if str( saveType ) == \"intervals\":\r\n gamethread.delayedname(float(saveLength), 'sourcerpg_databasesave', saveDatabase)\r\n debug.write(\"saveDatabase processed\", 1)", "def save(self):\r\n os.makedirs(self.settings.save_path, exist_ok=True)\r\n current_file = os.path.join(self.settings.save_path, 'current.json')\r\n if os.path.exists(current_file):\r\n raise FileExistsError()\r\n current_folder = os.path.join(self.settings.save_path, 'current')\r\n os.makedirs(current_folder, exist_ok=True)\r\n\r\n tosave = {\r\n 'generation': self.generation,\r\n 'approach_ind': self.approach[0],\r\n 'approach_params': Evolver._clean_params(self.approach_params),\r\n 'sensitive_params': Evolver._clean_params(self.sensitive_params)\r\n }\r\n\r\n with open(current_file, 'w') as outfile:\r\n json.dump(tosave, outfile)", "def save(self):\n with open(os.path.join(self.save_path, \"experiment.delira.pkl\"),\n \"wb\") as f:\n pickle.dump(self, f)\n\n self.params.save(os.path.join(self.save_path, \"parameters\"))", "def save_settings(self):\n settings = {'camera': self.comboCamera.currentIndex(),\n 'rotation': self.comboRotation.currentIndex(),\n 'colors': {\n 'min_hue': self.spinMinHue.value(),\n 'max_hue': self.spinMaxHue.value(),\n 'min_saturation': self.spinMinSaturation.value(),\n 'max_saturation': self.spinMaxSaturation.value(),\n 'min_value': self.spinMinValue.value(),\n 'max_value': self.spinMaxValue.value(),\n }, 'diameter': self.spinDiameter.value(),\n 'lifter': self.lineEditLifter.text(),\n 'save_video': self.checkSaveVideo.isChecked()\n }\n settings_file = open('./resources/settings.json', 'w')\n json.dump(settings, settings_file, indent=4)\n settings_file.close()\n self.statusbar.clearMessage()\n self.statusbar.showMessage('Settings saved.', 5000)", "def _save(self, itr):\n # using keep_checkpoint_every_n_hours as proxy for iterations between saves\n if self.saver and (itr + 1) % self.saver._keep_checkpoint_every_n_hours == 0:\n\n # collect params (or stuff to keep in general)\n params = dict()\n params['critic'] = self.critic.network.get_param_values()\n\n # if the environment is wrapped in a normalizing env, save those stats\n normalized_env = hgail.misc.utils.extract_normalizing_env(self.env)\n if normalized_env is not None:\n params['normalzing'] = dict(\n obs_mean=normalized_env._obs_mean,\n obs_var=normalized_env._obs_var\n )\n\n # save hierarchy\n for i, level in enumerate(self.hierarchy):\n params[i] = dict()\n params[i]['policy'] = level.algo.policy.get_param_values()\n \n # save params \n save_dir = os.path.split(self.saver_filepath)[0]\n hgail.misc.utils.save_params(save_dir, params, itr+1, max_to_keep=50)", "def test_should_be_able_to_save_with_always(self):\r\n v = AlwaysSaveStrategy.create(val=1)\r\n assert 'val' in v.as_save_params()\r\n v.val = 2\r\n assert 'val' in v.as_save_params()\r\n v.save()\r\n\r\n v1 = AlwaysSaveStrategy.get(v.vid)\r\n assert v1.val == 2", "def save_settings(client_id, time_format, country, time_zone):\n\tsettings = Settings(user_id=client_id,\n\t\t\ttime_format=time_format,\n\t\t\tcountry=country,\n\t\t\ttime_zone=time_zone)\n\tsession.merge(settings)\n\tsession.commit()", "def saveGame(self) -> None:\n self.state[\"phase\"] = self._phase\n\n state_as_string = json.dumps(self.state)\n with open(self.save_location, \"w\") as File:\n File.write(state_as_string)", "def save_db(self):\n logger.log('DEBUG', f'Saving results to database')\n lock.acquire()\n db = Database()\n db.create_table(self.domain)\n db.save_db(self.domain, self.results, self.source)\n db.close()\n lock.release()", "def save_settings(self):\n with open(self.settings_path, \"w\") as f:\n json.dump(self.settings, f, indent=4)", "async def save(self, job, options=None):\n if options is None:\n options = {}\n\n if not options.get('secretseed'):\n bundle = False\n filename = '/data/freenas-v1.db'\n else:\n bundle = True\n filename = tempfile.mkstemp()[1]\n os.chmod(filename, 0o600)\n with tarfile.open(filename, 'w') as tar:\n tar.add('/data/freenas-v1.db', arcname='freenas-v1.db')\n tar.add('/data/pwenc_secret', arcname='pwenc_secret')\n\n def read_write():\n with open(filename, 'rb') as f:\n f2 = os.fdopen(job.write_fd, 'wb')\n while True:\n read = f.read(1024)\n if read == b'':\n break\n f2.write(read)\n f2.close()\n await self.middleware.run_in_thread(read_write)\n\n if bundle:\n os.remove(filename)", "def do_save(self, arg):\n smores.save_session(self.__version__)", "def test_should_be_able_to_resave_with_once_strategy(self):\r\n v = OnceSaveStrategy.create()\r\n assert 'vid' not in v.as_save_params()\r\n v.save()", "def save(self):\n self.lock()\n\n trader = self.strategy.trader()\n\n for trade in self.trades:\n t_data = trade.dumps()\n ops_data = [operation.dumps() for operation in trade.operations]\n\n # store per trade\n Database.inst().store_user_trade((trader.name, trader.account.name, self.instrument.market_id,\n self.strategy.identifier, trade.id, trade.trade_type, t_data, ops_data))\n\n # dumps of regions\n trader_data = {}\n regions_data = [region.dumps() for region in self.regions]\n\n Database.inst().store_user_trader((trader.name, trader.account.name, self.instrument.market_id,\n self.strategy.identifier, self.activity, trader_data, regions_data))\n\n self.unlock()", "def save_inst(self):\n self.sanity_check()\n self.data_loaded_check()\n\n fname_pub_auth_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_auth_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_top, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_top, '_',\n self.config.experiment_id, '.pk'])\n\n pickle.dump(self.pub_auth_all, open(fname_pub_auth_all, 'wb'))\n pickle.dump(self.pub_auth_top, open(fname_pub_auth_top, 'wb'))\n pickle.dump(self.pub_inst_all, open(fname_pub_inst_all, 'wb'))\n pickle.dump(self.pub_inst_top, open(fname_pub_inst_top, 'wb'))\n\n fname_pub_history = ''.join([self.config.dir_data, '/history_',\n self.config.experiment_id, '.pk'])\n pickle.dump(self.history, open(fname_pub_history, 'wb'))\n\n fname_pub_staff = ''.join([self.config.dir_data, '/staff_',\n self.config.experiment_id, '.pk'])\n pickle.dump(self.staff, open(fname_pub_staff, 'wb'))", "def save_config(self):\n\n return self.perform_action('/mgmtd/db/save')" ]
[ "0.67361945", "0.643106", "0.6275285", "0.5926923", "0.57212836", "0.55074066", "0.5471723", "0.538028", "0.5350232", "0.5327738", "0.5318719", "0.53136206", "0.5307854", "0.5285224", "0.52835673", "0.52670777", "0.526356", "0.5198457", "0.51904655", "0.51861244", "0.518299", "0.5179166", "0.5168252", "0.5156271", "0.5141606", "0.51304823", "0.51262265", "0.5123735", "0.51191646", "0.5114189" ]
0.6784477
0
register a tag with name txt and with given foreground and background color
def register_tag(self, txt, foreground, background): # self.tag_config(txt, foreground=foreground, background=background) self.known_tags.add(txt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addColoredText(self, st, tag, word, fg='black', bg='white'):\n word = word + \" \"\n st.insert('end', word)\n end_index = st.index('end')\n begin_index = \"%s-%sc\" % (end_index, len(word) + 1)\n st.tag_add(tag, begin_index, end_index)\n st.tag_config(tag, foreground=fg, background=bg)\n return", "def __init__(self, red=Black.red, green=Black.green, blue=Black.blue):\n self.color = Color(red, green, blue)\n\n self.template = '\\ttextcolor = {textcolor};\\n'", "def tag(self, text):\n\t\tpass", "def __init__(self, text, start, end, color, alpha=1):\n self.text = text\n self.start = start\n self.end = end\n self.color = color\n self.alpha = alpha", "def color_text(txt, foreground=PALETTE['white'], background=PALETTE['black']):\n if isinstance(foreground, str) and foreground.startswith('#'):\n foreground = hex_to_rgb(foreground)\n if isinstance(background, str) and background.startswith('#'):\n background = hex_to_rgb(background)\n return '{}{}{}{}'.format(_f(*foreground), _b(*background), txt, _r())", "def text(self, str: str, x: int, y: int, colour: int, /) -> None:", "def _conf_highlight(self):\n textbuffer = self.ref_object.get_buffer()\n tag_table = textbuffer.get_tag_table()\n c_tag = tag_table.lookup(\"colored\")\n if not c_tag:\n c_tag = textbuffer.create_tag(\"colored\", foreground=\"#000000\", background=\"#FFFF00\")\n text = textbuffer.get_text(textbuffer.get_bounds()[0], textbuffer.get_bounds()[1])\n textbuffer.delete(textbuffer.get_bounds()[0], textbuffer.get_bounds()[1])\n for line in re.split(r'\\r\\n|\\r|\\n', text):\n for e in re.compile(\"(\" + self.entry.get_text().lower() + \")\", re.I).split(line):\n if re.search(self.entry.get_text().lower(), e, re.I):\n textbuffer.insert_with_tags(textbuffer.get_end_iter(), e, c_tag)\n else:\n textbuffer.insert_with_tags(textbuffer.get_end_iter(), e)\n textbuffer.insert_with_tags(textbuffer.get_end_iter(), '\\n')", "def _on_node_enter(self, *args): # pylint: disable=W0613\r\n self._text.node[\"fg\"] = (self.text_color.x + 0.1, self.text_color.y + 0.1,\r\n self.text_color.z + 0.1, 1.0)", "def fill_tag_table(self, buf):\n self.update_colors = False\n tag = buf.get_tag_table().lookup('smart_highlight')\n if tag != None:\n buf.get_tag_table().remove(tag)\n # Get valid foreground color\n fg = self.default_settings['foreground-color']\n if self.has_settings_schema:\n tmp_fg = self.settings.get_string('foreground-color')\n if Gdk.color_parse(tmp_fg):\n fg = tmp_fg\n # Get valid background color\n bg = self.default_settings['background-color']\n if self.has_settings_schema:\n tmp_bg = self.settings.get_string('background-color')\n if Gdk.color_parse(tmp_bg):\n bg = tmp_bg\n buf.create_tag(\"smart_highlight\", foreground=fg, background=bg)", "def crearEtiqueta(self, texto, fuente, fila, columna, color):\n etiqueta = Label(self.master, text=texto, font=fuente)\n etiqueta.grid(row=fila, column=columna,sticky=W, padx=10)\n etiqueta.configure(bg=color)\n return etiqueta", "def __init__(self, font, color, text=\"\", top=0, left=0, bottom=None, right=None):\n self.text = text\n self.font = font\n self.color = color\n self.top = top\n self.left = left\n self.bottom = bottom\n self.right = right\n self.renderLabel()", "def color(color):\n if sys.platform == \"win32\":\n if color == \"green\":\n set_text_attr(FOREGROUND_GREEN | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"yellow\":\n set_text_attr(FOREGROUND_YELLOW | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"red\":\n set_text_attr(FOREGROUND_RED | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"blue\":\n set_text_attr(FOREGROUND_BLUE | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"reset\":\n set_text_attr(FOREGROUND_GREY | get_text_attr() & 0x0070)\n else :\n if color == \"green\":\n sys.stdout.write('\\033[92m')\n elif color == \"red\":\n sys.stdout.write('\\033[91m')\n elif color == \"blue\":\n sys.stdout.write('\\033[94m')\n elif color == \"reset\":\n sys.stdout.write('\\033[0m')", "def gt(text, font=DEFAULT_FONT, color=\"magenta\",\n on_color=None, attr=None, width=80,\n justify=\"center\"):\n\n f = Figlet(\n font, width=width,\n justify=justify\n )\n r = f.renderText(text)\n return colored(r, color, on_color, attr)", "def render_text(self, title, pos, color):\n if title not in self.text_elements:\n self.text_elements[title] = UIText(\n title, pos, font_size=20, font_color=color)", "def callback(data):\n c = config[data.data]\n w.config(text=c['text'], fg=c['fg'], bg=c['bg'])\n f.config(bg=c['bg'])", "def __init__(self, text, font, pos, color=(255, 255, 255)):\r\n self.pos = pos\r\n self.label = font.render(text, 1, color)", "def setForeground(self, color = None):", "def add_text(self, text, font_name, color, align=CENTER):\n assert align in [self.CENTER, self.RIGHT, self.LEFT], \"Unknown aligment '{}'\".format(align)\n self._texts.append((text, fonts.get_filename(font_name), color, align))\n if self.is_portrait:\n self._texts_height = int(self.height // 6)\n else:\n self._texts_height = int(self.height // 8)\n self._final = None # Force rebuild", "def configureTags (self):\n self.window.tag_config(\"a\", foreground = \"blue\", underline=1)\n## self.window.tag_bind('a', '<Button-1>')\n self.window.tag_config('u', underline=1)\n self.window.tag_config('center', justify = CENTER)\n self.window.tag_config('right', justify = RIGHT)", "def set_text_attr(color):\n SetConsoleTextAttribute(stdout_handle, color)", "def set_generic_text_badge_color(node: hou.Node, color: hou.Color) -> None:\n # Get the color RGB values.\n rgb_value = color.rgb()\n\n with hou.undos.disabler():\n # Set the user data with a valid text -> color string.\n node.setUserData(\n _ht_generic_text_badge.get_generic_text_color_key(),\n f\"rgb {rgb_value[0]} {rgb_value[1]} {rgb_value[2]}\",\n )", "def highlightCode(self, _event=None):\n count = 0\n if self.text.tag_ranges('sel'):\n self.text.tag_add('color' + str(count), tk.SEL_FIRST, tk.SEL_LAST)\n self.text.tag_configure('color' + str(count), foreground='black', background='yellow')\n count += 1\n else:\n # Do this if you want to overwrite all selection colors when you change color without selection\n # for tag in text.tag_names():\n # text.tag_delete(tag)\n self.text.config(foreground='yellow')\n\n fileContainingText = open(newTextFile, \"a\")\n\n hText = self.text.get(tk.SEL_FIRST, tk.SEL_LAST)\n fileContainingText.write(hText)", "def open_highlight(self, pad, lang='c++'):\n pad.tag_configure('default', foreground='#e0115f')\n pad.tag_configure('loops', foreground='green')\n pad.tag_configure('P_datatypes', foreground='aqua')\n pad.tag_configure('quotes', foreground='gold')\n pad.tag_configure('A_datatypes', foreground='orange')\n for i in keywords[lang]:\n for j in keywords[lang][i]:\n self.highlight_pattern(pad, j, i)\n\n pattern = '\"([A-Za-z0-9_\\./\\\\-]*)\"'\n self.highlight_pattern(pad, pattern, 'quotes', '1.0', 'end', True)\n pattern = \"'([A-Za-z0-9_\\./\\\\-]*)'\"\n self.highlight_pattern(pad, pattern, 'quotes', '1.0', 'end', True)", "def feed(self, text, color=None):\n if color is not None:\n text = '\\x1b[%sm%s\\x1b[0m' % (color, text)\n self.__term.feed(text)", "def add_text(self, text, color, pos, font):\n text = font.render(text, True, color)\n text_rec = text.get_rect(center=pos)\n self.window.blit(text, text_rec)", "def tagger():", "def create_surface_with_text(text, font_size, text_rgb, bg_rgb):\r\n font = pygame.freetype.SysFont(\"Courier\", font_size, bold=True)\r\n surface, _ = font.render(text=text, fgcolor=text_rgb, bgcolor=bg_rgb)\r\n return surface.convert_alpha()", "def __init__(self, name, rect, **kwargs):\n self.name = name\n self.rect = pg.Rect(rect)\n self.color = (128, 128, 128)\n self.font = pg.font.SysFont(\"arial\", 12)\n self.text = self.font.render(name, False, pg.Color(\"white\"))\n self.selected_text = self.font.render(name, False, pg.Color(\"black\"))\n self.text_rect = self.text.get_rect(center=self.rect.center)\n self.set_kwargs(kwargs)", "def _colorstr(self, args):", "def create_surface_with_text(text, font_size, text_rgb, bg_rgb):\n font = pygame.freetype.SysFont(\"Courier\", font_size, bold=True)\n surface, _ = font.render(text=text, fgcolor=text_rgb, bgcolor=bg_rgb)\n return surface.convert_alpha()" ]
[ "0.67343915", "0.61832774", "0.61168456", "0.60767376", "0.6007534", "0.59904003", "0.5987998", "0.59658444", "0.5829146", "0.5719596", "0.566779", "0.5584578", "0.55843204", "0.5502387", "0.5499931", "0.5490123", "0.5455171", "0.5431202", "0.5430979", "0.5413367", "0.5404569", "0.5388752", "0.53771544", "0.5368401", "0.5343198", "0.53388494", "0.5338776", "0.53384024", "0.53381824", "0.5326119" ]
0.81564075
0
Works like builtin 2argument `iter()`, but stops on `exception`.
def iter_except(function, exception): try: while True: yield function() except exception: return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iter_except(func, exception):\n try:\n while True:\n yield func()\n except exception:\n pass", "def iter_except(function, exception):\n try:\n while True:\n yield function()\n except exception:\n return", "def iter_except(func, exception, first=None):\n try:\n if first is not None:\n yield first()\n while True:\n yield func()\n except exception:\n pass", "def iter_except(cls, func, exception, first=None):\n try:\n if first is not None:\n yield first() # For database APIs needing an initial cast to db.first()\n while True:\n yield func()\n except exception:\n pass", "def stop():\n raise StopIteration", "def forever(iterable):\n it = iter(iterable)\n while True:\n try:\n yield next(it)\n except Exception as e:\n print(e)\n it = iter(iterable)", "def stop(x=None):\n raise StopIteration(x)", "def testIterWithException(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tx.__iter__()\n\t\tc.setReturn(1)\n\t\tc.setException(Exception)\n\t\tc.replay()\n\t\ti = x.__iter__()\n\t\tself.failUnless(i.next() == 1)\n\t\tself.failUnlessRaises(Exception, i.next)", "def check_throw():\n while True:\n try:\n yield\n except ValueError:\n pass", "def EOF_or_raise(f):\n try:\n f.next()\n except StopIteration:\n return\n else:\n raise Exception(str(f))", "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def _next_exhausted(self):\n\n raise StopIteration() from None", "def iter_py():\n s = \"Hello, World!\"\n it = iter(s)\n while True:\n try:\n print(next(it))\n except:\n break\n\n ## Output\n # H\n # e\n # l\n # l\n # o\n # ,\n #\n # W\n # o\n # r\n # l\n # d\n # !", "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def __next__(self):\n if(self._isDone()):\n raise StopIteration\n return self._next()", "def __iter__():", "def __iter__():", "def __iter__():", "def __iter__():", "def http_error_tolerant_generator(f):\n @wraps(f)\n def inner(*args, **kwargs):\n try:\n for i in f(*args, **kwargs):\n yield i\n except HttpError as exc:\n logging.exception(\"Unhandled HttpError: %s\" % exc)\n raise StopIteration\n return inner", "def trycatch(*exceptions, catch_yields=...):\n\n def _trycath(arr):\n while True:\n try:\n yield next(arr)\n except Exception as e:\n for valid_exception in exceptions:\n if isinstance(e, valid_exception):\n if catch_yields is not ...:\n yield catch_yields\n return\n raise e\n\n return _trycath", "def yield_and_raise(data, exc):\n yield from data\n raise exc", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def try_safety():\n try:\n yield\n except Exception as e:\n pass", "def getIter(object):\n iterator = None\n try:\n iterator = iter(object)\n except TypeError:\n pass\n return iterator", "def __iter__(self):\n raise NotImplementedError(\"__iter__\")", "def test_stop_iteration_in_generators_yield_from(\n assert_errors,\n parse_ast_tree,\n code,\n statement,\n exception,\n default_options,\n):\n tree = parse_ast_tree(code.format(statement, exception))\n\n visitor = FunctionDefinitionVisitor(default_options, tree=tree)\n visitor.run()\n\n assert_errors(visitor, [StopIterationInsideGeneratorViolation])", "def cooperative_iter(citer):\n try:\n for chunk in citer:\n sleep(0)\n yield chunk\n except Exception as err:\n msg = (_(\"Error: cooperative_iter exception %(error)s\") %\n dict(error=err))\n LOG.error(msg)\n raise", "def next(space, w_iterator, w_default=None):\n try:\n return space.next(w_iterator)\n except OperationError as e:\n if w_default is not None and e.match(space, space.w_StopIteration):\n return w_default\n raise", "def catch_exception(cls, *args):\n\n if args and isinstance(args[0], list):\n items = args[0]\n else:\n items = list(args)\n\n return catch_exception(Enumerable.for_each(items))" ]
[ "0.7831509", "0.7759882", "0.7430991", "0.7154455", "0.7069062", "0.69544894", "0.68920565", "0.66940206", "0.6611821", "0.6493385", "0.63608396", "0.62707305", "0.62085617", "0.6189817", "0.61666644", "0.61463356", "0.61463356", "0.61463356", "0.61463356", "0.60950685", "0.6079748", "0.6043254", "0.6009896", "0.6005353", "0.5993588", "0.59647405", "0.5938109", "0.5918925", "0.589579", "0.58715594" ]
0.77927095
1
Read subprocess output and put it into the queue.
def reader_thread(self, q): try: with self.process.stdout as pipe: for line in iter(pipe.readline, b''): q.put(line) finally: q.put(None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _stdout_reader(self):\n self._is_launched.wait()\n stdout_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in stdout_iterator:\n self._log(\"raw\", \"stdout : {0}\".format(line.strip()))\n self.stdout_queue.put_nowait(line.strip())\n self.stdout_queue.put_nowait(None) # Stop queue consumers", "def read_stream(self, output_queue, stream_type):\n output = []\n\n # Get all available output off the queue.\n try:\n while 1:\n output.append(output_queue.get_nowait())\n except Empty:\n pass\n\n # If we read any output, toss it out to the logger\n if len(output):\n logger = logging.getLogger('taskmaster.processes.{}'.format(self.process_index))\n\n if stream_type == StreamType.Stdout:\n for line in output:\n logger.info(line)\n elif stream_type == StreamType.Stderr:\n for line in output:\n logger.error(line)\n\n # Get the current status to determine if we should try to read more or stop.\n current_status = psutil.STATUS_DEAD\n try:\n current_status = self.process.status()\n except psutil.NoSuchProcess:\n pass\n\n if current_status != psutil.STATUS_DEAD:\n # Process still alive, schedule the call to read more output.\n self.ioloop.call_later(0.1, self.read_stream, *[output_queue, stream_type])\n else:\n # Process has died. Flush the iostreams so the BlockingStreamReader triggers one last time and\n # nicely exits.\n self.process.stdout.flush()\n self.process.stderr.flush()", "def _queue_output(arguments, pidq, outputq):\n kwargs = arguments[\"process\"]\n input = arguments[\"input\"]\n\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n # pid None is read by the main thread as a crash of the process\n pidq.put(None)\n\n outputq.put((\n \"\",\n (\"Unexpected exception caught during execution: '{0}' . \".format(e)),\n 255)) # false exitcode\n\n return\n\n # Put the PID in the queue for main process to know.\n pidq.put(proc.pid)\n\n # Send input and wait for finish\n out, err = proc.communicate(input)\n\n out, err = out.decode('utf-8'), err.decode('utf-8')\n\n # Give the output back to the caller\n outputq.put((out, err, proc.returncode))", "def enqueue_output(self, out, queue):\n\n started = False\n finished = False\n\n while not self.stop:\n line = out.readline()\n queue.put(line)\n # Test if we have reached the end of the output\n if started and IPMITOOL_SHELL_PROMPT in line.decode('ascii'):\n finished = True\n if IPMITOOL_SHELL_PROMPT in line.decode('ascii'):\n started = True\n if finished and self.comms_lock.locked():\n self.comms_lock.release()\n started = False\n finished = False\n\n time.sleep(QUEUE_THREAD_SLEEP_TIME)", "def _queue_output(arguments, pidq, outputq):\n kwargs = arguments[\"process\"]\n input_data = arguments[\"input\"].encode(\"utf-8\") if arguments[\"input\"] else None\n\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n # pid None is read by the main thread as a crash of the process\n pidq.put(None)\n\n outputq.put((\n \"\",\n (\"Unexpected exception caught during execution of taskw: '{0}' . \"\n \"If you are running out-of-tree tests set TASK_USE_PATH=1 \"\n \"in shell env before execution and add the \"\n \"location of the task(d) binary to the PATH\".format(e)),\n 255)) # false exitcode\n\n return\n\n # Put the PID in the queue for main process to know.\n pidq.put(proc.pid)\n\n # Send input and wait for finish\n out, err = proc.communicate(input_data)\n\n if sys.version_info > (3,):\n out, err = out.decode('utf-8'), err.decode('utf-8')\n\n # Give the output back to the caller\n outputq.put((out, err, proc.returncode))", "def Wait(self):\n try:\n # Flush stdout and stderr to be sure no output is interleaved.\n sys.stdout.flush()\n sys.stderr.flush()\n\n # File position pointers are shared across processes, so we must open\n # our own file descriptor to ensure output is not lost.\n self._WaitForStartup()\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n results = []\n with open(self._output.name, 'r') as output:\n pos = 0\n running, exited_cleanly, task_errors, all_errors = (True, False, [], [])\n while running:\n # Check whether the process is still alive.\n running = self.is_alive()\n\n try:\n errors, results = \\\n self._queue.get(True, self.PRINT_INTERVAL)\n if errors:\n task_errors.extend(errors)\n all_errors.extend(errors)\n\n running = False\n exited_cleanly = True\n except Queue.Empty:\n pass\n\n if not running:\n # Wait for the process to actually exit. If the child doesn't exit\n # in a timely fashion, kill it.\n self.join(self.EXIT_TIMEOUT)\n if self.exitcode is None:\n msg = '%r hung for %r seconds' % (self, self.EXIT_TIMEOUT)\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessExitTimeout(msg), ''))\n self._KillChildren([self])\n elif not exited_cleanly:\n msg = ('%r exited unexpectedly with code %s'\n % (self, self.exitcode))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessUnexpectedExit(msg), ''))\n\n # Read output from process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n\n if len(buf) > 0:\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n elif running and time.time() > silent_death_time:\n msg = ('No output from %r for %r seconds' %\n (self, self.SILENT_TIMEOUT))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessSilentTimeout(msg), ''))\n self._KillChildren([self])\n\n # Read remaining output from the process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n running = False\n\n # Print output so far.\n while len(buf) > 0:\n sys.stdout.write(buf)\n pos += len(buf)\n if len(buf) < _BUFSIZE:\n break\n buf = output.read(_BUFSIZE)\n\n # Print error messages if anything exceptional occurred.\n if len(all_errors) > len(task_errors):\n logging.PrintBuildbotStepFailure()\n msg = '\\n'.join(x.str for x in all_errors if x)\n logging.warning(msg)\n traceback.print_stack()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n # Propagate any results.\n for result in results:\n results_lib.Results.Record(*result)\n\n finally:\n self.Cleanup(silent=True)\n\n # If an error occurred, return it.\n return all_errors", "def read(self):\n # now read stderr for log messages, we could buffer here but since\n # we're just logging the messages, I don't care to\n try:\n out = self.proc.stderr.read()\n if out:\n LOG.debug('reading %s got %d bytes on stderr', self.name,\n len(out))\n for line in out.splitlines():\n LOG.warning('%s: %s', self.name, line)\n except IOError as err:\n if err.errno != errno.EAGAIN:\n # allowing a caller to handle the exception as well\n raise\n except:\n LOG.exception('uncaught exception in stderr read')\n\n # This read call is non-blocking\n try:\n self.buffer += self.proc.stdout.read()\n if len(self.buffer):\n LOG.debug('reading %s, buffer now %d bytes',\n self.name, len(self.buffer))\n except IOError as err:\n if err.errno != errno.EAGAIN:\n raise\n except:\n # sometimes the process goes away in another thread and we don't\n # have it anymore\n LOG.exception('uncaught exception in stdout read')\n return\n\n # iterate for each line we have\n while self.buffer:\n idx = self.buffer.find('\\n')\n if idx == -1:\n break\n\n line = self.buffer[0:idx].strip()\n if line:\n self.datalines.append(line)\n self.buffer = self.buffer[idx+1:]", "def get_from_queue(self):\n while not self.receive_queue.empty():\n cmd, kwargs = bcp.decode_command_string(\n self.receive_queue.get(False))\n self._process_command(cmd, **kwargs)", "def _run_monitor_thread(self):\n while True:\n chunk = self.stream.read(1024)\n if not chunk:\n # EOF - subprocess has exited, so trigger shutdown\n trigger_exit(ExitMode.CHILD)\n break\n self.output_deque.appendleft(chunk)", "def read_incoming(self):\r\n buf = ''\r\n debug_prompt = re.compile(r'\\A[\\w]+>>? ')\r\n while 1:\r\n try:\r\n buf += os.read(self.fid, 100).decode('utf8')\r\n except:\r\n self.queue.put(None)\r\n return\r\n lines = buf.splitlines()\r\n for line in lines[:-1]:\r\n self.queue.put(line)\r\n if buf.endswith('\\n'):\r\n self.queue.put(lines[-1])\r\n buf = ''\r\n elif re.match(debug_prompt, lines[-1]):\r\n self.queue.put(lines[-1])\r\n buf = ''\r\n else:\r\n buf = lines[-1]", "def write_queued_output(self):\n for stream in [\"stdout\", \"stderr\"]:\n while True:\n output, queue_size = getattr(self, stream).readline(timeout=0.1)\n if not (output is None or len(output) == 0):\n self.log(output, self.log_level[stream])\n if queue_size == 0:\n break", "def _read_thread(proc, ready_event):\n ready = False\n while True:\n line = proc.stdout.readline()\n if not line:\n break\n\n if output_lines is not None:\n output_lines.append(line)\n\n if not ready and indicator in line:\n ready = True\n ready_event.set()", "def __init__(self):\n # Open stata as pipe; make a queue for non-blocking. Start the thread.\n self.proc = sp.Popen(['stata-mp'], stdin=sp.PIPE, stdout=sp.PIPE, bufsize=1)\n\n self.qu = Queue()\n\n self.thread = Thread(target = self.enqueue_output, args = (self.proc.stdout,\n self.qu))\n self.thread.daemon = True\n self.thread.start()\n\n # Read the initial stdout content.\n self.genout()", "def __readStdout(self):\n if self.process is not None:\n self.process.setReadChannel(QProcess.StandardOutput)\n \n while self.process.canReadLine():\n s = str(self.process.readLine(),\n Preferences.getSystem(\"IOEncoding\"),\n 'replace')\n if (\n self.currentChangelist != \"\" and\n self.rx_status.exactMatch(s)\n ):\n file = self.rx_status.cap(5).strip()\n filename = file.replace(self.path + os.sep, \"\")\n if filename not in self.changeListsDict[\n self.currentChangelist\n ]:\n self.changeListsDict[self.currentChangelist].append(\n filename)\n elif (\n self.currentChangelist != \"\" and\n self.rx_status2.exactMatch(s)\n ):\n file = self.rx_status2.cap(2).strip()\n filename = file.replace(self.path + os.sep, \"\")\n if filename not in self.changeListsDict[\n self.currentChangelist\n ]:\n self.changeListsDict[self.currentChangelist].append(\n filename)\n elif self.rx_changelist.exactMatch(s):\n self.currentChangelist = self.rx_changelist.cap(1)\n if self.currentChangelist not in self.changeListsDict:\n self.changeListsDict[self.currentChangelist] = []", "def _read_rs(self, process, append):\n print('read_rs thread started')\n for line in iter(process.stdout.readline, \"\"):\n if 'value 1' in line.decode('utf-8'):\n self.vol_up()\n if 'value -1' in line.decode('utf-8'):\n self.vol_down()\n print('read_rs thread stopped')", "def continuous_shell_reader(self):\n\n while not self.thread_stop.is_set():\n out = self.shell_reader()\n\n if not out == \"\":\n print(\"IPC: Received: {}\".format(out))", "def _read_pb(self, process, append):\n print('read_pb thread started')\n for line in iter(process.stdout.readline, \"\"):\n if 'value 1' in line.decode('utf-8'):\n self.vol_mute()\n print('read_pb thread stopped')", "def run_command(self):\n\n while True:\n current_line = self.process.stdout.readline().rstrip()\n\n if not current_line:\n break\n\n yield self.decode_output(current_line)", "def clean_output(self, process, queue):\n while True:\n try:\n dirty = process.getline()\n clean = self.parse(dirty)\n except Queue.Empty:\n process.queueHasData.wait()\n except ValueError as inst:\n print(\"Error: \" + str(inst))\n else:\n if clean != None:\n self.cleanOutput.append(clean)", "def run(self, arguments=None, debug=False):\n\n # kill the child process if we receive a terminate signal\n def terminate_child_process(child, signum, frame):\n try:\n if child and signum != signal.SIGINT:\n child.terminate()\n child.wait()\n finally:\n sys.exit()\n\n # poll the pty for available data to read, then push to a queue and signal ready\n def produce_queue(queue, master_fd, slave_fd, evt, proc):\n with os.fdopen(master_fd, 'rb', 0) as task_stream:\n while 1:\n ready = select.select([master_fd], [], [], 0)[0]\n\n # exit if our process has terminated and no more input\n if not ready and proc.poll() is not None:\n os.close(slave_fd)\n evt.set()\n break\n\n if master_fd in ready:\n # POSIX.1 requires PIPE_BUF to be at least 512 bytes, but Linux uses 4096 bytes\n data = os.read(master_fd, 4096)\n\n if not data:\n # reached EOF, signal data ready in case the queue is not empty, then exit\n evt.set()\n break\n else:\n # put data in the queue and signal the consumer thread\n queue.put(data)\n evt.set()\n\n # wait for ready signal, then read data from queue and save to a buffer\n # once the buffer contains an end of line, send that to a callback if defined,\n # then send the line to a file for later processing\n def consume_queue(queue, filename, evt, proc, callback=None):\n streambuffer = []\n with open(filename, 'w+') as fileobj:\n while 1:\n # wait for a signal at most one second at a time so we can check the child process status\n evt.wait(1)\n if queue.empty() and proc.poll() is not None:\n # make sure the last part of the buffer is written out\n if streambuffer:\n if callback:\n callback(streambuffer[0])\n\n fileobj.write(streambuffer[0])\n fileobj.flush()\n break\n elif queue.empty():\n # the queue is empty, but our child process has not exited yet, so data may show up still\n continue\n\n data = queue.get_nowait()\n streambuffer.append(data)\n queue.task_done()\n\n # As soon as we see an end of line from the stream, we should write.\n # Since we could receive many lines per queue chunk, we want to pass\n # a line at a time to our callback.\n if '\\n' in data:\n merged = \"\".join(streambuffer)\n lines = merged.split('\\n')\n\n if len(lines) > 1 and '\\n' not in lines[-1]:\n streambuffer = [lines[-1]]\n lines.pop()\n else:\n streambuffer = []\n\n if callback:\n for x in lines:\n if not x:\n continue\n callback(x)\n\n fileobj.write(\"\".join(lines))\n fileobj.flush()\n\n command_list = self._build_command_list(arguments,debug)\n\n self.logger.info(\"Executing {0}\".format(\" \".join(command_list)))\n\n stdout_name = 'task_stdout_{}'.format(datetime.datetime.utcnow().isoformat())\n stderr_name = 'task_stderr_{}'.format(datetime.datetime.utcnow().isoformat())\n\n stderr = open(stderr_name, 'w+')\n\n # Use pty to provide a workaround for buffer overflow in stdio when monitoring stdout\n master_stdout_fd, slave_stdout_fd = pty.openpty()\n #master_stderr_fd, slave_stderr_fd = pty.openpty()\n #task = subprocess.Popen(command_list, stdout=slave_stdout_fd, stderr=slave_stderr_fd, close_fds=True)\n task = subprocess.Popen(command_list, stdout=slave_stdout_fd, stderr=stderr.fileno(), close_fds=True)\n\n # force termination signal handling of the child process\n signal_handler = functools.partial(cleanup, task)\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n\n stdout_queue = Queue.Queue()\n stdout_data_ready = threading.Event()\n\n t1 = threading.Thread(target=produce_queue, args=(stdout_queue, master_stdout_fd, slave_stdout_fd, stdout_data_ready, task))\n t1.daemon = True\n t1.start()\n\n t2 = threading.Thread(target=consume_queue, args=(stdout_queue, stdout_name, stdout_data_ready, task, self.callback))\n t2.daemon = True\n t2.start()\n\n #stderr_queue = Queue.Queue()\n #stderr_data_ready = threading.Event()\n\n #t3 = threading.Thread(target=produce_queue, args=(stderr_queue, master_stderr_fd, slave_stderr_fd, stderr_data_ready, task))\n #t3.daemon = True\n #t3.start()\n\n #t4 = threading.Thread(target=consume_queue, args=(stderr_queue, stderr_name, stderr_data_ready, task))\n #t4.daemon = True\n #t4.start()\n\n task.wait()\n\n t1.join()\n t2.join()\n #t3.join()\n #t4.join()\n\n stdout = open(stdout_name, 'rb')\n #stderr = open(stderr_name, 'rb')\n stderr.seek(0)\n\n task_output = {}\n task_output[\"stdout\"] = \"\".join(stdout.readlines())\n task_output[\"stderr\"] = \"\".join(stderr.readlines())\n\n stdout.close()\n stderr.close()\n os.remove(stdout_name)\n os.remove(stderr_name)\n\n if task.returncode != 0:\n self.logger.error(task.returncode)\n raise Exception(task_output[\"stdout\"], task_output[\"stderr\"])\n else:\n return task_output", "def _retrieve_output(thread, timeout, queue, thread_error):\n # Try to join the thread on failure abort\n thread.join(timeout)\n if thread.isAlive():\n # Join should have killed the thread. This is unexpected\n raise TimeoutWaitingFor(thread_error + \". Unexpected error\")\n\n # Thread died so we should have output\n try:\n # data = (stdout, stderr, exitcode)\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor(\"streams from program\")\n\n return data", "def wait(self):\n\n for output in self.proc.communicate():\n if output is not None:\n self.output += output", "def _get_output(arguments, timeout=None):\n # NOTE Increase this value if tests fail with None being received as\n # stdout/stderr instead of the expected content\n output_timeout = 0.1 # seconds\n\n pidq = Queue()\n outputq = Queue()\n\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n\n # Process crashed or timed out for some reason\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to start\")\n\n # Wait for process to finish (normal execution)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program thread to join\")\n\n # If we reach this point we assume the process got stuck or timed out\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n # Start with lower signals and escalate if process ignores them\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n # 3 means the process finished/died between last check and now\n if e.errno != 3:\n raise\n\n # Wait for process to finish (should die/exit after signal)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to die\")\n\n # This should never happen but in case something goes really bad\n raise OSError(\"Program stopped responding and couldn't be killed\")", "def _get_output(arguments, timeout=None):\n # NOTE Increase this value if tests fail with None being received as\n # stdout/stderr instead of the expected content\n output_timeout = 0.1 # seconds\n\n pidq = Queue()\n outputq = Queue()\n\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n\n # Process crashed or timed out for some reason\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior to start\")\n\n # Wait for process to finish (normal execution)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior thread to join\")\n\n # If we reach this point we assume the process got stuck or timed out\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n # Start with lower signals and escalate if process ignores them\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n # ESRCH means the process finished/died between last check and now\n if e.errno != errno.ESRCH:\n raise\n\n # Wait for process to finish (should die/exit after signal)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior to die\")\n\n # This should never happen but in case something goes really bad\n raise OSError(\"TaskWarrior stopped responding and couldn't be killed\")", "def _stream(cmd):\n # color_print(getuser() + '$ ' + cmd, COLOR.BLUE)\n output = [] # used to collect o/p from both stdout and stderr\n\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)\n except subprocess.CalledProcessError as ex:\n print(\"Status : FAIL\", ex.returncode, ex.output)\n else:\n with proc.stdout:\n for line in iter(proc.stdout.readline, b''):\n # print(line)\n output.append(line)\n\n # Note: output is streamed to the user as and when it occurs.\n with proc.stderr:\n for line in iter(proc.stderr.readline, b''):\n # print(line)\n output.append(line)\n\n return output", "def cmd_iter(cmd):\n\n def thread_enqueue(label, f, q):\n t = threading.Thread(target=enqueue_output, args=(label, f, q))\n t.daemon = True ## thread dies with the program\n t.start()\n return t\n\n def enqueue_output(label, out, queue):\n prev_line = None\n for line in out.read():\n if prev_line is not None:\n queue.put((label, \"%s\\n\" % prev_line))\n prev_line = line\n # print(\"%s: %r\" % (label, line))\n # print(\"END of %s\" % (label, ))\n if prev_line:\n queue.put((label, prev_line))\n out.close()\n\n proc = Proc(cmd)\n proc.stdin.close()\n q = Queue()\n t1 = thread_enqueue(\"out\", proc.stdout, q)\n t2 = thread_enqueue(\"err\", proc.stderr, q)\n running = True\n while True:\n try:\n yield q.get(True, 0.001)\n except Empty:\n if not running:\n break\n proc.poll()\n running = proc.returncode is None or \\\n any(t.is_alive() for t in (t1, t2))\n\n # print(\"%s: %r\" % (\"errlvl\", proc.returncode))\n yield \"errorlevel\", proc.returncode", "def processq(self):\n\n while True:\n command = None\n lock = Locker(str(self.qlockfile))\n if lock.lockfile():\n if self.queuefile.exists():\n line = self.queuefile.read_text()\n q = line.split(',')\n if any(q):\n command = q.pop(0)\n # remember q has now changed\n if not any(q):\n self.queuefile.unlink()\n else:\n line = \",\".join(q)\n self.queuefile.write_text(line)\n lock.unlockfile()\n\n if command:\n self.execute(command)\n else:\n break", "def recieve(self):\n return self.__proc.stdout.readline().strip('\\n')", "def get_queue(queue_name=\"\"):\n print(get_qstat_arg(queue_name))\n q = subprocess.Popen(\n _get_qstat_arg(queue_name), stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, stdin=subprocess.PIPE\n )\n o, e = q.communicate()\n\n return o", "def __call__(self) -> buffer.Buffer:\n processed_buffer = self.output_queue.get()\n\n return processed_buffer" ]
[ "0.7643372", "0.70199263", "0.6941756", "0.6892083", "0.68583477", "0.667833", "0.65482545", "0.65377265", "0.6516418", "0.6509647", "0.6480462", "0.6398246", "0.6274676", "0.62624496", "0.6258729", "0.62502366", "0.62449336", "0.6241449", "0.62395567", "0.62352574", "0.6220445", "0.61907655", "0.6125156", "0.60733354", "0.60645014", "0.60506815", "0.6018411", "0.59909254", "0.5985926", "0.5975419" ]
0.7216844
1
Update GUI with items from the queue.
def update(self, q): for line in iter_except(q.get_nowait, Empty): # display all content if line is None: self.process.kill() self.process = None return else: result = str(line).replace('\\r', '\r').replace('\\\\', '\\').replace("b'","").replace("b\"","").replace("\\n'", "\n") self.listbox.write(result) break # display no more than one line per 40 milliseconds self.root.after(40, self.update, q) # schedule next update
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gui_process(self):\n ti = self.scan_queue.qsize()\n t = TqdmUpTo(total=self.scan_queue.qsize(), unit='Files')\n\n while True:\n try:\n t.update(ti - self.scan_queue.qsize())\n ti = self.scan_queue.qsize()\n if self.message_queue.__len__() > 0:\n for m in self.message_queue:\n TqdmUpTo.write(m)\n self.message_queue.remove(m)\n # We dont need more then 60fps in the terminal :P\n except BrokenPipeError:\n continue", "def _process_incoming_queue_messages(self):\n while self._queue.qsize():\n msg = self._queue.get()\n if msg == MAP_UPDATE:\n self._clear_measurement_progress_label()\n self._presenter.update_map(self.chosen_value.get())", "def update_(self):\n self.update_listbox()\n self.update_infobox()\n self.update_statusbar()\n self.listbox.select_set(0)\n self.listbox.focus_set()", "def update_Q(self):", "def _update_gui(self):\r\n \r\n # Update the RF button.\r\n rf_on = self.api.get_output()\r\n if rf_on == None: rf_on = True\r\n self.button_rf.set_checked(rf_on, block_events=True).enable()\r\n \r\n # Update the combo; we block first just in case the value doesn't \"change\"\r\n if self.api == None: self.label_instrument_name.set_text('Simulation')\r\n else:\r\n if self.api.get_mode() == 'Fixed': self.combo_mode.set_value(0, block_events=True).enable()\r\n else: self.combo_mode.set_value(1, block_events=True).enable()\r\n self._combo_mode_changed()\r\n \r\n # Update the list plot\r\n self.query_list()", "def updateWidget(self):\n pass", "def run(self):\n list_count = self.queue_list.count()\n for i in range(list_count):\n if self._isRunning:\n currentItem = self.queue_list.item(0)\n self.statusChange.emit(currentItem.fName, currentItem.video, currentItem.audio)\n self.func(self.queue_list, 0)\n self.notifyProgress.emit((i+1)/list_count * 100) # current progress = completed / total jobs\n self.revertButton.emit(\"Convert\")\n # self.notifyProgress.emit(0)", "def visualizar(self):\n print(self.queue)", "def update_gui(self):\n for where, updates in self.gui_updates.items():\n self.window[where].update(**updates)\n self.gui_updates = {}", "def handle_display_queue_message(self):\n while True:\n try:\n # Parse info in the display_queue\n origin, contents, data = self.display_queue.get_nowait()\n # If the origin is the Mandelbrot thread, use the data to\n # update the Mandelbrot image\n if origin == \"mandelbrot_thread\":\n self.mpl_mandelbrot.update_image(*data)\n # A message from the Mandelbrot thread means it's done \n # computing - Send updated info to get it started on the\n # next computation\n self.request_mandelbrot_computation()\n except QueueEmpty: break", "def processIncoming(self):\n try:\n msg = self.guiqueue.get(False)\n (PK, XERRO, tt) = getDataFromString(msg)\n self.PK0.append(PK[0])\n self.PK1.append(PK[1])\n self.PK2.append(PK[2])\n self.PK3.append(PK[3])\n self.PK4.append(PK[4])\n self.PK5.append(PK[5])\n self.PK6.append(PK[6])\n self.PK7.append(PK[7])\n self.XERRO0.append(XERRO[0])\n self.XERRO1.append(XERRO[1])\n self.XERRO2.append(XERRO[2])\n self.XERRO3.append(XERRO[3])\n self.XERRO4.append(XERRO[4])\n self.XERRO5.append(XERRO[5])\n self.XERRO6.append(XERRO[6])\n self.XERRO7.append(XERRO[7])\n self.t.append(tt)\n \n \n #replota a cada dado novo\n self.updatePlot()\n \n except queue.Empty:\n #print(\"Fila Vazia\")\n pass\n\n self.root.after(25, self.processIncoming)", "def queue_gui_update(self, element_key, update_dict):\n self.gui_updates[element_key] = update_dict", "def showqueue(self, irc, msg, args):\n if len(self._queue) == 0:\n irc.reply(\"The queue is empty\", private=True)\n return\n pos = self._find_in_queue(msg.nick)\n if pos < 0:\n irc.reply(\"You're not in the queue, did your nick change?\",\n private=True)\n return\n irc.reply(\"You are queued at position %d\" % (pos + 1), private=True)", "def _sync_gui(self):\n self._update_buttons()\n\n self.turn_value_label.config(text=self.turn_value_text)\n self.selected_piece_value_label.config(text=self.selected_piece_value_text)\n\n self.update()", "def update_messages():\n\n scrollbar = Scrollbar(root)\n scrollbar.pack(side=RIGHT, fill=Y)\n listbox = Text(root, wrap =WORD, yscrollcommand=scrollbar.set, background=\"#CCFFCC\", fg=\"black\", selectbackground=\"#003300\",\n highlightcolor=\"#0033CC\")\n\n msgs = []\n run = True\n while run:\n\n time.sleep(0.1) # update every 1/10 of a second\n new_messages = c1.get_messages() # get any new messages from client\n msgs.extend(new_messages) # add to local list of messages\n\n for msg in new_messages: # display new messages\n print(msg)\n #title_label = Label(text=str(msg), bg=\"#CCFFCC\", fg=\"black\", padx=34, pady=5, font=\"comicsansms 9 bold\",borderwidth=3,wraplength=300, relief=SUNKEN)\n #title_label.pack(side=TOP)\n\n listbox.insert(END, str(msg)+'\\n\\n')\n listbox.pack(fill=BOTH, padx=36)\n scrollbar.config(command=listbox.yview)\n\n if msg == \"{quit}\":\n root.destroy()\n run = False\n break", "def queue_consumer(self, q):\n\n self.status = 'Running...'\n\n while True:\n try:\n msg = q.get_nowait()\n if msg is None:\n break\n self.update_plot(msg)\n except Queue.Empty:\n time.sleep(0.1)\n\n self.status = 'Done'", "def update(self):\n # even if we change the widget trough the :meth:`resubmit` or :meth:`terminate` methods, go back to the original one\n if self.job.status == JobStatus.RUN:\n # Show stdout of running jobs\n stdout_widget = self._load_file_as_widget(self.job.f_stdout, 'stdout')\n if stdout_widget is not None:\n self.file_widgets_list = [urwid.Pile([stdout_widget, urwid.Divider('*')])]\n self.widget.original_widget = urwid.Pile(self.info_widgets_list + self.file_widgets_list + self.metadata_widgets_list)", "async def queue(self, ctx):\n srv = self.get_server_dict(ctx.message.server.id)\n que = srv['queue']\n msg = self.format_song_display('▶', srv['song'][1], srv['song'][2], srv['song'][3])\n i = 1\n for item in que:\n line = self.format_song_display(i, item[1], item[2], item[3])\n i += 1\n msg += line\n await ctx.bot.send_message(ctx.message.channel, msg)", "def run(self):\n run=0\n wx.CallAfter(Publisher().sendMessage, \"update\", \"\")\n time.sleep(10)\n while (run==0):\n wx.CallAfter(Publisher().sendMessage, \"updatebuttons\", \"\")\n time.sleep(10)", "def update_quality():\n global items, converted_items\n if not converted_items:\n items = convert_items(items)\n converted_items = True\n for item in items:\n item.update_q()", "def queueUpdate(self):\n packts = 1\n self.ueLst = list(self.ues.keys())\n self.resAlloc(self.nrbUEmax)\n sym = 0\n if self.nrbUEmax == 0:\n self.sm_lim = 0\n else:\n if self.mimomd == 'MU':\n self.sm_lim = self.symMax*self.nlayers\n else:\n self.sm_lim = self.symMax\n\n while len(self.ueLst)>0 and packts>0 and sym < self.sm_lim:\n ue = self.ueLst[self.ind_u]\n self.printDebDataDM('---------------- '+ue+' ------------------<br>') # print more info in debbug mode\n if self.ues[ue].symb>0:\n if len(self.ues[ue].bearers)>0 and sym < self.sm_lim:\n if len(self.ues[ue].pendingTB)==0: # No TB to reTX\n sym = sym + self.rrcUncstSigIn(ue)\n if sym < self.sm_lim and len(self.ues[ue].bearers[0].buffer.pckts)>0:\n sym = sym + self.dataPtoTB(ue)\n else: # There are TB to reTX\n self.printPendTB()\n sym = sym + self.retransmitTB(ue)\n if self.dbMd:\n self.printQtb() # Print TB queue in debbug mode\n self.updIndUE()\n packts = self.updSumPcks()", "def update_command():\n # global selected_tuple\n backend.update(selected_tuple[0], \n title_text.get(), \n author_text.get(), \n year_text.get(), \n isbn_text.get())", "def update(self):\n self.grid.update()\n sleep(self.update_rate)", "def refresh_queue(self):\n state = self.get_state()\n return state.refresh_queue()", "def update_list_view(self):\n self.model.dataChanged.emit(self.model.index(0, 1),\n self.model.index(len(self.model.data_list), 1))\n #self.pBar.setValue(localization.localizationProgress() * 100)", "def update_all(self, event=None):\n key = str(self.comboBox.currentText())\n self.update_combobox()\n if key:\n idx = self.comboBox.findText(key)\n if idx == -1:\n idx = 0\n else:\n idx = 0\n self.comboBox.setCurrentIndex(idx)\n self.update_list()", "def update(self):\n self.m.update()", "def updateStatus(self):\n done = False\n if not self.pg.is_alive():\n done = True\n while not self.pg.msgQueue.empty():\n msg = str(self.pg.msgQueue.get(False))\n self.monitorTextBox.append(msg)\n if done:\n self.timer.stop()\n self.pg.join()\n self.runButton.setEnabled(True)\n self.stopButton.setEnabled(False)\n if self.pg.ex:\n etype, evalue, etrace = self.pg.ex\n el = traceback.format_exception(etype, evalue, etrace)\n for line in el:\n self.monitorTextBox.append(line)\n self.setStatusBar.emit(\n \"Surrogate Failed Elapsed Time: {0}\".format(\n hhmmss(math.floor(time.time() - self.timeRunning))\n )\n )\n else:\n self.setStatusBar.emit(\n \"Surrogate Finished, Elapsed Time: {0}\".format(\n hhmmss(math.floor(time.time() - self.timeRunning))\n )\n )\n if self.pg.driverFile != \"\":\n try:\n df = os.path.abspath(self.pg.driverFile)\n except:\n pass\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Driver File Location\")\n msgBox.setText(\n \"The surrogate model driver file path is: {0}\".format(\n os.path.abspath(df)\n )\n )\n msgBox.exec_()\n else:\n self.refreshContents()\n self.setStatusBar.emit(\n \"Surrogate Model Generation, Elapsed Time: {0}s\".format(\n math.floor(time.time() - self.timeRunning)\n )\n )", "def update_ui(self):\n # main data\n self.lAcc.setText(self.settings.ACCOUNT)\n # self.lExcessLiquidity.setText(str(self.ibkrworker.app.excessLiquidity))\n # self.lSma.setText(str(self.ibkrworker.app.sMa))\n if hasattr(self.ibkrworker.app, 'smaWithSafety'):\n self.lSma.setText(str(round(self.ibkrworker.app.smaWithSafety, 1)))\n else:\n self.lSma.setText(str(round(self.ibkrworker.app.sMa, 1)))\n self.lMarketValue.setText(str(self.ibkrworker.app.netLiquidation))\n self.lblAvailTrades.setText(str(self.ibkrworker.app.tradesRemaining))\n self.lcdPNL.display(self.ibkrworker.app.dailyPnl)\n if self.ibkrworker.app.dailyPnl > 0:\n palette = self.lcdPNL.palette()\n palette.setColor(palette.WindowText, QtGui.QColor(51, 153, 51))\n self.lcdPNL.setPalette(palette)\n elif self.ibkrworker.app.dailyPnl < 0:\n palette = self.lcdPNL.palette()\n palette.setColor(palette.WindowText, QtGui.QColor(255, 0, 0))\n self.lcdPNL.setPalette(palette)\n\n total_positions_value = 0\n for p in self.ibkrworker.app.openPositions.values():\n if hasattr(p, 'Value'):\n total_positions_value += p[\"Value\"]\n self.lPositionsTotalValue.setText(str(round(total_positions_value, 1)))\n\n self.update_open_positions()\n self.update_live_candidates()\n self.update_open_orders()\n\n # everything disabled for safety - is now enabled\n self.chbxProcess.setEnabled(True)\n self.btnSettings.setEnabled(True)\n\n self.update_session_state()\n\n if not self.uiTimer.isActive():\n self.update_console(\"UI resumed.\")\n self.uiTimer.start(int(self.settings.INTERVALUI) * 1000) # reset the ui timer", "def processIncoming(self):\r\n while self.queue.qsize():\r\n try:\r\n volume_T101 = self.queue.get(0)\r\n self.var_pb_progress.set(volume_T101/100) #scale to 100\r\n self.var_T101.set(\"T101: \" + str(round(volume_T101,4)))\r\n self.var_LIT101.set(self.take_reading(volume_T101))\r\n self.update_physical(volume_T101)\r\n self.PLC_command()\r\n self.check_attack(volume_T101)\r\n self.output_results()\r\n self.master.update_idletasks()\r\n except queue.Empty:\r\n pass" ]
[ "0.6706958", "0.6474775", "0.63087624", "0.62826693", "0.6243645", "0.62302905", "0.6202968", "0.61998534", "0.612823", "0.6117151", "0.611539", "0.6067368", "0.606616", "0.60095227", "0.5993423", "0.59805995", "0.5947106", "0.5916233", "0.5896627", "0.5875889", "0.58683205", "0.5845721", "0.58312905", "0.58103985", "0.57946885", "0.57917446", "0.57754976", "0.5763594", "0.5760334", "0.57589036" ]
0.6962187
0
Update the adjusted data for a Band that has already been added using add_plot
def set_adjusted_data(self, data:HistogramPlotData, band:Band): plots:AdjustableHistogramControl = self.__plots[band] if plots is not None: plots.set_adjusted_data(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_plot(self, raw_data:HistogramPlotData, adjusted_data:HistogramPlotData, band:Band):\n plots = AdjustableHistogramControl(band)\n plots.set_raw_data(raw_data)\n plots.set_adjusted_data(adjusted_data)\n plots.limit_changed.connect(self.limit_changed)\n plots.limits_reset.connect(self.limits_reset)\n\n self.__plots[band] = plots\n self.__plot_layout.addWidget(plots)\n\n if self.__plot_layout.count() == 2:\n self.__init_menu()\n\n if band == Band.RED or band == Band.GREEN or band == Band.BLUE:\n self.__wire_band(band, plots)", "def update_plot():\n pass", "def update_plot(self,ax):\n self.replot(ax)", "def updatePlot(self):\n if len(self.baslin):\n X = list(t[0] for t in self.baslin)\n Y = list(t[1] for t in self.baslin)\n self.BLplt.set_xdata(X)\n self.BLplt.set_ydata(Y)\n if self.BLtyp == 'S':\n if self.BL is None:\n self.BL, = self.axes.plot(self.data[0], self.data[2], linestyle='-', color='green')\n else:\n self.BL.set_ydata(self.data[2])\n self.canvas.draw()", "def _update_plot(self, *args):\n # type: (dict, dict, dict, dict, dict) -> None\n if len(args) != 5 and not any([isinstance(arg, dict) for arg in args]):\n raise ValueError('Illegal arguments for _update_plot of %s' % self.__name__)\n desvars, responses, objectives, constraints, metadata = args\n\n data = self._compute_new_data(desvars, responses, objectives, constraints, metadata)\n self.cs[:, self.iter] = data[:]\n self.quad.set_array(self.cs.ravel())\n self.ax.set_xlim([-.5, self.iter+.5])\n self.iter += 1", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.canvas.draw()", "def update_plot(self,ax):\n for i,line in enumerate(self.lines):\n line.set_ydata(self.data[i].f)\n for line in self.lines: \n ax.draw_artist(line)", "def _update_plot(self) -> None:\n\n # Check if plotting is active\n if self._fig is None:\n return None\n LOG.debug(\"Updating plot.\")\n\n # Extract glaciated area\n hs_back = np.ma.masked_where(\n self.h <= 1,\n hillshade(\n self.ele, self.PLOT_HILLSHADE_AZIMUTH, self.PLOT_HILLSHADE_ALTITUDE\n ),\n )\n\n # Clear plot and draw axes\n self._fig.clear()\n ax = plt.subplot(121, facecolor=\"black\")\n ax.tick_params(axis=\"x\", colors=\"w\")\n ax.tick_params(axis=\"y\", colors=\"w\")\n ax.set(xlabel=\"X-coordinate [m]\", ylabel=\"Y-coordinate [m]\")\n ax.xaxis.label.set_color(\"w\")\n ax.yaxis.label.set_color(\"w\")\n title_text = f\"Year: {str(self.i)} ELA: {str(int(self.ela))} m.a.s.l.\"\n ax.set_title(title_text, color=\"white\", size=18)\n\n # Draw new image layers\n plt.imshow(self.hs, vmin=90, vmax=345, cmap=\"copper\", extent=self.extent)\n plt.imshow(255 - hs_back, vmin=1, vmax=150, cmap=\"Greys\", extent=self.extent)\n\n # Mass balance\n ax1 = plt.subplot(222, facecolor=\"black\")\n ax1.plot(self.mass_balance, color=\"w\")\n ax1.plot(self.mass_balance_trend, color=\"r\")\n ax1.set(ylabel=\"Mass balance [m]\")\n ax1.yaxis.label.set_color(\"w\")\n plt.setp(ax1.get_xticklabels(), visible=False)\n ax1.tick_params(axis=\"y\", colors=\"w\")\n ax1.set_title(f\"Gradient: {str(self.m)} m/m\", color=\"white\", size=18)\n\n # Plot mean thickness\n ax2 = plt.subplot(224, sharex=ax1, facecolor=\"black\")\n ax2.plot(self.mass, color=\"w\")\n ax2.set(xlabel=\"Year [a]\", ylabel=\"Mean thickness [m]\")\n ax2.xaxis.label.set_color(\"w\")\n ax2.yaxis.label.set_color(\"w\")\n ax2.tick_params(axis=\"x\", colors=\"w\")\n ax2.tick_params(axis=\"y\", colors=\"w\")\n\n # Draw new plot\n self._fig.canvas.draw()\n plt.pause(0.05)", "def updateArrayPlotData(self):\n self.arrayPlotData.set_data(\"channel0\",self.array0)\n self.arrayPlotData.set_data(\"channel1\",self.array1)\n self.arrayPlotData.set_data(\"channel2\",self.array2)\n self.arrayPlotData.set_data(\"channel3\",self.array3)\n self.arrayPlotData.set_data(\"channel4\",self.array4)\n self.arrayPlotData.set_data(\"channel5\",self.array5)\n self.arrayPlotData.set_data(\"channel6\",self.array6)\n self.arrayPlotData.set_data(\"channel7\",self.array7)\n self.arrayPlotData.set_data(\"cursorXS\",self.cursorXS)\n #self.arrayPlotData.set_data(\"cursorVertical\",self.cursorVertical)", "def _plot_update(self):\n omit_log = ['sens_log']\n for log_group, log_arrays in self.qbpm.log_names.items():\n for log_array in log_arrays:\n if log_array not in omit_log:\n self.curves[log_array].setData(self.qbpm.log_time, self.qbpm.log_arrays[log_array],clear=True)\n # self.fill.setCurves(self.curves['posz_sens_low_log'], self.curves['posz_sens_high_log'])", "def update_visualization(self) -> None:\n pass", "def update_plots(ax, ax2, frequency_weights, chord_frequencies, samples):\n\n # Update the frequency domain bar chart with new frequency weights\n plt.subplot(2, 1, 2)\n for i in range(3):\n ax2.patches[i].set_height(frequency_weights[i])\n\n # Dynamically rescale the x axis of time domain plot to appropriately fit the visualization\n # Useful if you want to dynamically change chord frequencies with a new\n # potentiometer\n xmin, xmax = ax.get_xlim()\n smallest_frequency = min(chord_frequencies)\n longest_period = 1.0 / smallest_frequency\n ax.set_xlim(xmax=(100 * longest_period))\n\n # Update the time domain waveform with new measurements\n line1.set_ydata(samples)", "def update_data(self):\n\n # Update all plots in the figure\n self.data = self.model.measurements.get_bokeh_vis_data()\n self.source.stream(self.data, len(self.data))\n self.line_source.stream(self.data[self.data.agent_type == 'system'])\n self.school_dropdown_func()\n\n # Update the utility histograms\n self.update_histograms()\n\n # Update the composition histograms\n to_update = [self.neighbourhood_composition_quads, \n self.school_composition_quads, self.distance_quads]\n\n for quads in to_update:\n\n # Grab the new data\n if quads == self.neighbourhood_composition_quads:\n hist_data = self.composition_data(agent_type='neighbourhood')\n elif quads == self.school_composition_quads:\n hist_data = self.composition_data(agent_type='school')\n else:\n hist_data = self.composition_data(agent_type='household')\n\n # Update the bars and edges\n for group in hist_data.keys():\n\n hist, edges = np.histogram(hist_data[group],\n density=True,\n bins=20)\n\n # Update histogram\n quads[group].data_source.data['top'] = hist\n quads[group].data_source.data['left'] = edges[:-1]\n quads[group].data_source.data['right'] = edges[1:]", "def update_overlaid_plot(self, key, _):\n if key == self.controls.Arrays.WAVEFORMS:\n\n trigger = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][0]\n trace = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][1]\n waveforms = [trigger, trace]\n\n first_peak, second_peak = self.get_windowed_data(waveforms[0], waveforms[1])\n self.overlaid_lines[0].set_ydata(first_peak)\n self.overlaid_lines[0].set_xdata(range(len(first_peak)))\n self.overlaid_lines[1].set_ydata(second_peak)\n self.overlaid_lines[1].set_xdata(range(len(second_peak)))\n\n areas = [integ.simps(first_peak), integ.simps(second_peak)]\n labels = ['%.1f' % areas[0], '%.1f' % areas[1]]\n\n# for area in areas:\n# if area < 0.1:\n# raise RangeError # calculation warning error for example\n self.ax2.legend([self.overlaid_lines[0], self.overlaid_lines[1]],\n labels)\n\n self.draw()", "def updateplot(self):\n plotfiles = []\n try:\n self.plotter.reset()\n self.plotter.set_xrange(self.xrangemin.value(), self.xrangemax.value())\n self.plotter.set_yrange(self.yrangemin.value(), self.yrangemax.value())\n self.plotter.set_bgirange(self.bgintmin.value(), self.bgintmax.value())\n self.plotter.set_pkrange(self.halphamin.value(), self.halphamax.value())\n for n,pf in enumerate(self.selecteddata):\n tf = os.path.join(self.tempdir, \"tf%d\" % n)\n self.dfparser.writefile(tf, pf)\n plotfiles.append(tf)\n self.plotter.set_plot(plotfiles)\n except datafile.Datafile_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()\n except plotter.Plotter_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()", "def update_figure(self):\n # if number of kinetics in model did not change\n # update just last lines\n if self.N_lines - 1 == len(self.model.spectra.keys()) * 2:\n self.dataplot.lines[-1].remove()\n self.dataplot.lines[-1].remove()\n self.draw_figure_first()\n # delete all and redraw\n else:\n n = int((self.N_lines - 1) / 2)\n for _ in range(n):\n self.dataplot.lines[-1].remove()\n self.dataplot.lines[-1].remove()\n self.draw_figure_total()\n\n self.dataplot.relim()\n\n self.dataplot.autoscale_view(True, True, True)\n\n self.draw()", "def update_figure(self):\n\n self.draw()", "def refresh_plot(self):\n self.ax.relim() # recompute the data limits\n self.ax.autoscale_view() # automatic axis scaling\n self.fig.canvas.flush_events()", "def update(self, *args):\n #Fetches slider information\n s1=self.s1.get()\n s2=self.s2.get()\n r1=self.r1.get()\n r2=self.r2.get()\n p=self.p.get()\n\n #Changes the number next to the bar\n self.r1_string.configure(text=\"%.2f\"% r1)\n self.r2_string.configure(text=\"%.2f\"% r2)\n self.s1_string.configure(text=\"%.2f\"% s1)\n self.s2_string.configure(text=\"%.2f\"% s2)\n self.p_string.configure(text=\"%.2f\"% self.p.get())\n\n #Creates two asset objects\n self.I1 = Instrument(r1, s1, \"Asset 1\", \"Equity\")\n self.I2 = Instrument(r2, s2, \"Asset 2\", \"Bond\")\n\n #Builds a portfolio object\n self.port = Portfolio([self.I1, self.I2])\n self.port.addcorr([[0,p]])\n\n #Displays the new graph to the graph frame\n fff =Frame(height=400, width=400, bd=10, bg='white')\n Chart(self.port, 0.02).scatter(fff)\n fff.grid(row=1, column=0)", "def _DoUpdatePlot( self, wd, ht ):\n self.ax.grid(\n True, 'both', 'both',\n\tcolor = '#c8c8c8', linestyle = ':', linewidth = 1\n\t)", "def on_append_plot(self, event=None):\n self._on_plot_selection()\n data_id, theory_id, state_id = self.set_data_helper()\n self.parent.plot_data(data_id=data_id,\n state_id=state_id,\n theory_id=theory_id,\n append=True)", "def update_bars(self):\n raise NotImplementedError(\"Should implement update_bars()\")", "def _update_plots(self):\n #Adding in new data to plots\n currSignal = self._ai_client.get_ai_voltage(self._ai_channel, max_range=self.max_input_voltage)\n self.measured_powers = np.append(self.measured_powers[1:], np.mean(currSignal))\n self.out_voltages = np.append(self.out_voltages[1:], self._curr_output_voltage)\n self.errors = np.append(self.errors[1:], (currSignal[-1] - self.voltageSetpoint))\n self.sp_data = np.append(self.sp_data[1:], self.voltageSetpoint)\n #Update power plots\n self.widgets['curve'][0].setData(self.measured_powers*self.gain)\n #Update setpoint plots\n self.widgets['curve'][1].setData(self.sp_data*self.gain)\n\n # Now update voltage polots\n self.widgets['curve'][2].setData(self.out_voltages)\n self.widgets['curve'][3].setData(self.errors*self.gain)", "def graphplot(self):\n if self.binned:\n self.line.set_ydata(self.fft_bins_y)\n else:\n self.line.set_ydata(self.spec_y)\n self.line2.set_ydata(self.wave_y)\n self.ax1.draw_artist(self.ax1.patch)\n self.ax2.draw_artist(self.ax2.patch)\n self.ax1.draw_artist(self.line)\n self.ax2.draw_artist(self.line2)\n self.fig.canvas.update()\n self.fig.canvas.flush_events()" ]
[ "0.68253666", "0.67685705", "0.6681961", "0.64841163", "0.64281446", "0.6168467", "0.6168467", "0.6168467", "0.6168467", "0.6168467", "0.6162655", "0.61557037", "0.61169", "0.61146575", "0.6005888", "0.5998489", "0.5994703", "0.5939009", "0.5906529", "0.5905252", "0.58663887", "0.5860728", "0.5848988", "0.5807817", "0.5779619", "0.5774069", "0.57435215", "0.5733049", "0.5720655", "0.57013774" ]
0.70409757
0
Returns the derivative of output_name with respect to wrt.
def get_derivative(self, output_name, wrt): return self.gradient[wrt][output_name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_2nd_derivative(self, output_name, wrt):\n \n return self.hessian[wrt[0]][wrt[1]][output_name]", "def find_derivative(name: str):\n return _derivatives[name]", "def _derivative_(self, x, diff_param=None):\n return 2*exp(-x**2)/sqrt(pi)", "def get_gradient(self, output_name=None):\n \n return array([self.gradient[wrt][output_name] for wrt in self.param_names])", "def derivative ( self ):\n return self.__derivative", "def cost_derivative_for_output(output_activations, y):\n return output_activations - y", "def d_dweight(self, ):\n return self.gen_df_dw(self.params, self.output_shape)(self.current_input)", "def derivative_activation(z):\n return activation(z) * (1 - activation(z))", "def derivative(self, x):\n z = np.asarray(x)\n return (self._der(z.flatten())).reshape(z.shape)", "def derivative(a, y, z):\n return (a - y) * Sigmoid.derivative(z)", "def _derivativeTerm(self):\n\n\t\treturn self._Kd * (self._getErrorFunction() - self._previousError) / self._dt", "def cost_derivative(output_activations, y):\n return (output_activations - y)", "def cost_derivative(self, output_activations, y):\r\n return (output_activations-y)", "def backward(self, d_output=None):\n if d_output is None:\n d_output = 1.0\n backpropagate(VariableWithDeriv(self, d_output))", "def derivative(self, *args):\n if self.i_dim == 0:\n return np.ones_like(*args[0])\n else:\n return np.zeros_like(*args[0])", "def cost_derivative(self, output_activations, y):\n return (output_activations-y)", "def cost_derivative(self, output_activations, y):\n return (output_activations-y)", "def cost_derivative(self, output_activations, y):\n return (output_activations-y)", "def cost_derivative(self, output_activations, y):\n return (output_activations-y)", "def cost_derivative(self,output_results,y):\r\n\t\treturn (output_results-y)", "def exterior_der(self):\n from utilities import format_unop_txt, format_unop_latex\n if self._exterior_derivative is None:\n vmodule = self._vmodule # shortcut\n rname = format_unop_txt('d', self._name)\n rlname = format_unop_latex(r'\\mathrm{d}', self._latex_name)\n resu = vmodule.alternating_form(self._tensor_rank+1, name=rname, \n latex_name=rlname)\n for dom, rst in self._restrictions.iteritems():\n resu._restrictions[dom] = rst.exterior_der()\n self._exterior_derivative = resu\n return self._exterior_derivative", "def Derivative(self, *args):\n return _Adaptor3d.Adaptor3d_InterFunc_Derivative(self, *args)", "def derivative(a, y, z):\n return a - y + (z * 0)", "def cost_derivative(self, output_activations, y):\n return 2 * (output_activations - y)", "def cost_derivative(self, output_activations, y):\n\t\treturn (output_activations - y)", "def derivative_func(t, x, Approx_func):\n return x.dot(Approx_func)", "def cost_derivation(update_param: np.ndarray,\n dependent_param: np.ndarray,\n label: int\n ) -> np.ndarray:\n return (sigmoid(update_param @ dependent_param) - label)*dependent_param", "def backwards(delta,params,name='',activation_deriv=sigmoid_deriv):\n # everything you may need for this layer\n W = params['W' + name]\n b = params['b' + name]\n X, pre_act, post_act = params['cache_' + name]\n # your code here\n # do the derivative through activation first\n # then compute the derivative W,b, and X\n \n delta_pre = activation_deriv(post_act) * delta\n # (in_dim, out_dim) = (in_dim, examples) @ (examples, out_dim)\n grad_W = X.transpose() @ delta_pre\n grad_b = np.sum(delta_pre, axis=0, keepdims=True) # (1, out_dim)\n # (examples, in_dim) = (examples, out_dim) @ (out_dim, in_dim)\n grad_X = delta_pre @ W.transpose()\n\n # store the gradients\n params['grad_W' + name] = grad_W\n params['grad_b' + name] = grad_b\n return grad_X", "def ortho_derivative_label(f):\n o = ortho_derivative(f)\n return \"d{}; w{}\".format(\n pretty_spectrum(differential_spectrum(o)),\n pretty_spectrum(walsh_spectrum(o), absolute=True),\n )", "def make_dhdu(ham, controls, derivative_fn):\n\n dHdu = []\n for ctrl in controls:\n dHdu.append(derivative_fn(ham, ctrl['symbol']))\n\n return dHdu" ]
[ "0.794627", "0.6320714", "0.6095984", "0.60070467", "0.5971002", "0.5934078", "0.58912855", "0.57599115", "0.5751379", "0.57422125", "0.5739002", "0.57238007", "0.5716149", "0.5715266", "0.5707183", "0.56966895", "0.56966895", "0.56966895", "0.56966895", "0.56952906", "0.5676925", "0.5670001", "0.56629336", "0.56206024", "0.56170076", "0.5605334", "0.5591167", "0.55492926", "0.5547523", "0.5525551" ]
0.8644599
0
Returns the 2nd derivative of output_name with respect to both vars in the tuple wrt.
def get_2nd_derivative(self, output_name, wrt): return self.hessian[wrt[0]][wrt[1]][output_name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_derivative(self, output_name, wrt):\n \n return self.gradient[wrt][output_name]", "def _numerical_derivative_two_sided (func:typing.Callable[[float],float], x_0:float) -> typing.Tuple[float,float]:\n epsilon = 1.0e-9\n func_x_0 = func(x_0)\n deriv_neg = (func_x_0 - func(x_0-epsilon)) / epsilon\n deriv_pos = (func(x_0+epsilon) - func_x_0) / epsilon\n return deriv_neg, deriv_pos", "def find_derivative(name: str):\n return _derivatives[name]", "def second_derivative_code(self, variable_pair):\n variable1, variable2 = sorted(variable_pair)\n if (variable1, variable2) not in self._second_derivative_code:\n filename = '<second derivative of %s wrt %s,%s>' % (self.tag, \n variable1, variable2)\n expr = self.second_derivative((variable1, variable2))\n code = compile(expr, filename, mode='eval')\n self._second_derivative_code[(variable1, variable2)] = code\n return self._second_derivative_code[(variable1, variable2)]", "def Derivative(self, *args):\n return _Adaptor3d.Adaptor3d_InterFunc_Derivative(self, *args)", "def GetDerivative(self, *args):\n return _ITKCostFunctionsPython.itkMultipleValuedCostFunction_GetDerivative(self, *args)", "def second_derivative(self,show=False):\n\n df = [None]\n d2f = [None]\n\n # Take first difference: f1 = (f(x+dx)-f(x))/dx\n for i in range(0,len(self.ans)-1,1):\n df += [int((self.ans[i+1][1] - self.ans[i][1]) / (self.ans[i+1][0] - self.ans[i][0]))]\n\n for i in range(1,len(self.ans)-1,1):\n d2f += [int((df[i+1] - df[i]) / (self.ans[i+1][0] - self.ans[i][0]))]\n d2f += [None]\n\n if show == True:\n print('First derivative:', df)\n print('Second derivative:', d2f)\n\n return d2f", "def cost_derivative_for_output(output_activations, y):\n return output_activations - y", "def __call__ ( self , func , x , h , der = False ) :\n\n ## calculate differences \n imax = self.__order + 2 if der else self.__order + 1\n i = 0\n while i < imax : \n j = i + 1\n self.__df[i] = func ( x + j * h ) - func ( x - j * h )\n i += 1\n \n ## 1) calculate 1st derivative \n result = dot_fma ( self.__order + 1 , self.__df , self.__d1 ) / ( self.__sf1 * h ) \n if not der : return result \n \n ## 2) calculate Nth derivative \n dd = dot_fma ( self.__order + 2 , self.__df , self.__d2 ) / ( self.__sf2 * h**(self.__order*2+3) ) \n \n return result, dd", "def layer_backward(d_output, cache):\n\n # Unpack cache values\n x, w, z, output = cache\n\n # Compute derivatives (gradients)\n d_x, d_w = None, None\n\n return d_x, d_w", "def derivative(self, *args):\n if self.i_dim == 0:\n return np.ones_like(*args[0])\n else:\n return np.zeros_like(*args[0])", "def second_derivative(self, variable_pair):\n v1, v2 = sorted(variable_pair)\n cached_derivative = self._second_derivatives.get((v1,v2),None)\n if cached_derivative:\n return cached_derivative\n \n first_derivative = self.derivative(v1)\n return self._second_derivatives.setdefault((v1,v2),\n em.diff_expr(first_derivative,\n v2))", "def cost_derivative(self,output_results,y):\r\n\t\treturn (output_results-y)", "def _2ndderiv_xyz(self,x,y,z,i,j):\n return -np.pi*self._rhoc_M*self.a**3*self._b*self._c *\\\n _2ndDerivInt(x,y,z,self._a2,self._b2*self._a2,self._c2*self._a2,self.n,i,j)", "def calc_derivative(self, array_in, direction1, direction2 = False):\r\n A = array_in.copy()\r\n if direction1 != direction2:\r\n #Remove Nyquist frequency for even sample size and odd order of differentiation\r\n if direction1 == 'x' or direction2 == 'x':\r\n A[0,:] = 0.0\r\n if direction1 == 'y' or direction2 == 'y':\r\n A[:,0] = 0.0\r\n\r\n # Note that 'x' corresponds to the x1 direction, and 'y' to the\r\n # x2 direction\r\n # Perform first derivative in desired direction\r\n if direction1 == 'x':\r\n out = self.deriv_mat_x1*A\r\n elif direction1 == 'y':\r\n out = self.deriv_mat_x2*A\r\n\r\n # Perform second derivative in desired direction\r\n if direction2 == 'x':\r\n out = self.deriv_mat_x1*out\r\n elif direction2 == 'y':\r\n out = self.deriv_mat_x2*out\r\n\r\n return out", "def _evalAndDer(self, x):\n m = len(x)\n fx = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n fx[:, j] = self.functions[j](x)\n i = self.argcompare(fx, axis=1)\n y = fx[np.arange(m), i]\n dydx = np.zeros_like(y)\n for j in range(self.funcCount):\n c = i == j\n dydx[c] = self.functions[j].derivative(x[c])\n return y, dydx", "def _evalAndDer(self, x):\n m = len(x)\n fx = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n fx[:, j] = self.functions[j](x)\n i = self.argcompare(fx, axis=1)\n y = fx[np.arange(m), i]\n dydx = np.zeros_like(y)\n for j in range(self.funcCount):\n c = i == j\n dydx[c] = self.functions[j].derivative(x[c])\n return y, dydx", "def backward(self, d_output=None):\n if d_output is None:\n d_output = 1.0\n backpropagate(VariableWithDeriv(self, d_output))", "def _2ndderiv_xyz(self, x, y, z, i, j):\n return (\n 4.0\n * numpy.pi\n * self._b\n * self._c\n * _2ndDerivInt(\n x,\n y,\n z,\n lambda m: self._mdens(m),\n lambda m: self._mdens_deriv(m),\n self._b2,\n self._c2,\n i,\n j,\n glx=self._glx,\n glw=self._glw,\n )\n )", "def GetDerivative(self, *args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_GetDerivative(self, *args)", "def cost_derivative(output_activations, y):\n return (output_activations - y)", "def derivativeY(self, *args):\n if self.n_dims >= 4:\n j = 2\n else:\n j = 1\n if self.i_dim == j:\n return np.ones_like(*args[0])\n else:\n return np.zeros_like(*args[0])", "def cost_derivative(self, output_activations, y):\r\n return (output_activations-y)", "def cost_derivative(self, output_activations, y):\n return (output_activations-y)", "def cost_derivative(self, output_activations, y):\n return (output_activations-y)", "def cost_derivative(self, output_activations, y):\n return (output_activations-y)", "def cost_derivative(self, output_activations, y):\n return (output_activations-y)", "def derivative(a, y, z):\n return a - y + (z * 0)", "def cost_derivative(self, output_activations, y):\n return 2 * (output_activations - y)", "def derivative(a, y, z):\n return (a - y) * Sigmoid.derivative(z)" ]
[ "0.7327428", "0.62851566", "0.6239669", "0.622021", "0.61582315", "0.610452", "0.609245", "0.6030133", "0.6005503", "0.600335", "0.59997815", "0.594842", "0.5872099", "0.58583814", "0.583254", "0.5830568", "0.5830568", "0.58183813", "0.58112955", "0.58069754", "0.5799643", "0.5778134", "0.57691956", "0.5760064", "0.5760064", "0.5760064", "0.5760064", "0.5753897", "0.57535076", "0.5749823" ]
0.7861094
0
Returns the gradient of the given output with respect to all parameters.
def get_gradient(self, output_name=None): return array([self.gradient[wrt][output_name] for wrt in self.param_names])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gradient(self, node, output_grad):\r\n return [auto_sum_op(output_grad, get_shape_op(node.inputs[0]) ), 0-auto_sum_op(output_grad, get_shape_op(node.inputs[1]) )]\r\n #return [auto_sum_op(output_grad, ), 0-output_grad]\r", "def gradient(self, node, output_grad):\n return [output_grad]", "def gradient(self, node, output_grad):\r\n return [output_grad]", "def gradient(self, node, output_grad):\r\n return [output_grad]", "def gradient(self, node, output_grad):\r\n #return [output_grad]\r\n return [broadcast_to(output_grad,get_shape_op(node.inputs[0]),node.const_attr)]", "def gradient(self, node, output_grad):\r\n #return [output_grad]\r\n return [broadcast_mean_to(output_grad,get_shape_op(node.inputs[0]),node.const_attr)]", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [ output_grad / node.inputs[0] ]", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [auto_sum_op(output_grad / node.inputs[1] ,get_shape_op(node.inputs[0])), auto_sum_op(-output_grad * node.inputs[0] / node.inputs[1] / node.inputs[1] , get_shape_op(node.inputs[1]) ) ]", "def gradient(self, node, output_grad):\r\n return [ - output_grad]", "def gradient(self, node, output_grad):\r\n return [auto_sum_op(output_grad, get_shape_op(node.inputs[0])) , zeroslike_op(node.inputs[1])]\r\n #assert True\r", "def gradient(self, node, output_grad):\r\n return [reshape_op(output_grad , get_shape_op(node.inputs[0])), zeroslike_op(node.inputs[1])]", "def gradient(self, node, output_grad):\n return [node.inputs[1] * output_grad, node.inputs[0] * output_grad]", "def gradient(self, node, output_grad):\r\n raise NotImplementedError", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [ - output_grad * node.const_attr / node.inputs[0] / node.inputs[0] ]", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [output_grad * exp(node.inputs[0])]", "def gradient(self, node, output_grad):\r\n return [auto_broadcast_op(output_grad, get_shape_op(node.inputs[0])) , zeroslike_op(node.inputs[1])]\r\n #assert True\r", "def gradient(self, node, output_grad):\n return [output_grad, output_grad]", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [auto_sum_op(output_grad * node.inputs[1] , get_shape_op(node.inputs[0])), auto_sum_op(output_grad * node.inputs[0] , get_shape_op(node.inputs[1]))]", "def gradient(self, node, output_grad):\r\n return [auto_sum_op(output_grad, get_shape_op(node.inputs[0]) ), auto_sum_op(output_grad, get_shape_op(node.inputs[1]) )]", "def compute_gradient(self, grad=None):\n if grad is None:\n grad = backend.ones_like(self.output_value)\n dx = -grad\n return dx", "def gradient(self, node, output_grad):\r\n if node.const_attr==None:\r\n return [reduce_sum(output_grad,reduction_indices = None),zeroslike_op(node.inputs[1])]\r\n else:\r\n return [reduce_sum(output_grad,reduction_indices = list(node.const_attr)),zeroslike_op(node.inputs[1])]", "def gradient(self, node, output_grad):\r\n return [output_grad]\r\n \"\"\"higher accuracy notice notice here\"\"\"", "def gradient(self, node, output_grad):\n return None", "def gradient(self, node, output_grad):\r\n return [reshape_op(output_grad , get_shape_op(node.inputs[0]))]", "def gradient(self, node, output_grad):\r\n return None", "def gradient(self, node, output_grad):\r\n if node.const_attr==None:\r\n return [reduce_mean(output_grad,reduction_indices = None),zeroslike_op(node.inputs[1])]\r\n else:\r\n return [reduce_mean(output_grad,reduction_indices = list(node.const_attr)),zeroslike_op(node.inputs[1])]", "def gradient(self, node, output_grad):\r\n return [conv2d_grad_op1(node.inputs[0], node.inputs[1], node.const_attr , output_grad),conv2d_grad_op2(node.inputs[0], node.inputs[1], node.const_attr , output_grad)]", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [output_grad / node.const_attr ]", "def gradient(self, inputs):\n raise NotImplementedError", "def get_input_grad(self, Y, output_grad):\n return np.multiply(logistic_deriv(Y), output_grad)" ]
[ "0.76306516", "0.76022816", "0.75950354", "0.75950354", "0.757593", "0.7486398", "0.7485103", "0.747612", "0.7423461", "0.741599", "0.73761606", "0.73707384", "0.7339724", "0.73261887", "0.73257333", "0.7322427", "0.73202705", "0.7306291", "0.7302661", "0.7257447", "0.7244565", "0.7208363", "0.71758956", "0.7168543", "0.71533334", "0.71521527", "0.7126637", "0.71241575", "0.7106616", "0.71064603" ]
0.77606237
0
Returns the Hessian matrix of the given output with respect to all parameters.
def get_Hessian(self, output_name=None): #return array([self.hessian[in1][in2][output_name] for (in1,in2) in product(self.param_names, self.param_names)]) return array([self.hessian[in1][in2][output_name] for (in1,in2) in product(self.param_names, self.param_names)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_hessian(self):\n return self.tc.hessian_func(\n self.pf.XS[:, :, 0].transpose(),\n self.pf.XS[:, :, 1].transpose(),\n self.pf.WS[:].transpose())", "def _get_hessian(self):\n if not self.sparse:\n hess = numpy.dot(self.jacobian_T, self.jacobian)\n else:\n hess = self.jacobian_T*self.jacobian\n return hess", "def hessian(self, params):\n\n if self.use_sqrt:\n return self.hessian_sqrt(params)\n else:\n return self.hessian_full(params)", "def hessian(self, x):\n h = self._hess(\n time_series=self.observed_data,\n a=x[0],\n b=x[1],\n c=x[2],\n sigma=self.sigma\n )\n return h", "def hessian(self, x1, x2, out=None):\n raise NotImplementedError", "def hessian(self):\n\n hess_size = 3 * len(self.molecule.molecule['input'])\n\n # output.dat is the psi4 output file.\n with open('output.dat', 'r') as file:\n\n lines = file.readlines()\n\n for count, line in enumerate(lines):\n if '## Hessian' in line or '## New Matrix (Symmetry' in line:\n # Set the start of the hessian to the row of the first value.\n hess_start = count + 5\n break\n else:\n raise EOFError('Cannot locate Hessian matrix in output.dat file.')\n\n # Check if the hessian continues over onto more lines (i.e. if hess_size is not divisible by 5)\n extra = 0 if hess_size % 5 == 0 else 1\n\n # hess_length: # of cols * length of each col\n # + # of cols - 1 * #blank lines per row of hess_vals\n # + # blank lines per row of hess_vals if the hess_size continues over onto more lines.\n hess_length = (hess_size // 5) * hess_size + (hess_size // 5 - 1) * 3 + extra * (3 + hess_size)\n\n hess_end = hess_start + hess_length\n\n hess_vals = []\n\n for file_line in lines[hess_start:hess_end]:\n # Compile lists of the 5 Hessian floats for each row.\n # Number of floats in last row may be less than 5.\n # Only the actual floats are added, not the separating numbers.\n row_vals = [float(val) for val in file_line.split() if len(val) > 5]\n hess_vals.append(row_vals)\n\n # Remove blank list entries\n hess_vals = [elem for elem in hess_vals if elem]\n\n reshaped = []\n\n # Convert from list of (lists, length 5) to 2d array of size hess_size x hess_size\n for old_row in range(hess_size):\n new_row = []\n for col_block in range(hess_size // 5 + extra):\n new_row += hess_vals[old_row + col_block * hess_size]\n\n reshaped.append(new_row)\n\n hess_matrix = array(reshaped)\n\n # Cache the unit conversion.\n conversion = 627.509391 / (0.529 ** 2)\n hess_matrix *= conversion\n\n check_symmetry(hess_matrix)\n\n return hess_matrix", "def _getHessian(self):\n assert self.init, 'GP not initialised'\n assert self.fast is False, 'Not supported for fast implementation'\n\n if self.cache['Hessian'] is None:\n ParamMask = self.gp.getParamMask()['covar']\n std = sp.zeros(ParamMask.sum())\n H = self.gp.LMLhess_covar()\n It = (ParamMask[:, 0] == 1)\n self.cache['Hessian'] = H[It, :][:, It]\n\n return self.cache['Hessian']", "def calculate_hessian(model, data, step_size):\n hessian = pd.DataFrame(0, index = np.arange(data.shape[0]), columns=pd.MultiIndex.from_product([model.output_names, model.perturbation_feature_pairs + model.feature_names], names=['model.output_names','model.feature_pairs']))\n for output_name in model.output_names:\n hessian_calculation_helpers = create_hessian_calculation_columns(model, output_name)\n mixed_derivative = (data.loc[:, hessian_calculation_helpers[0]].values - data.loc[:, hessian_calculation_helpers[1]].values - data.loc[:, hessian_calculation_helpers[2]].values + data.loc[:, hessian_calculation_helpers[3]].values) / (step_size * step_size)\n mixed_derivative *= np.sign(data.loc[:, hessian_calculation_helpers[1]].values + data.loc[:, hessian_calculation_helpers[2]].values - 2 * data.loc[:, hessian_calculation_helpers[0]].values)\n hessian.loc[:, zip([output_name] * len(model.perturbation_feature_pairs), model.perturbation_feature_pairs)] = mixed_derivative\n hessian.loc[:, zip([output_name] * len(model.feature_names), model.feature_names)] = np.array([(data.loc[:, (output_name,f)] - data.loc[:, (output_name,'core')]) / (step_size) for f in model.feature_names]).T\n return hessian", "def calc_hessian_at(self, x: np.ndarray) -> np.ndarray:\n\n if self.hessian_f:\n # if the problem has knowledge about the hessian, use it directly without approximation\n return self.hessian_f(x)\n\n return hessian_approximation(self.f, x)", "def hessian(self, params, *args, **kwargs):\n if self._use_approx_cs:\n return approx_hess_cs(params, self.loglike,\n args=args, kwargs=kwargs)\n else:\n return approx_hess(params, self.loglike,\n args=args, kwargs=kwargs)", "def _hessian(self):\n log_g = np.log(self._gv())\n log_f = np.log(self._fv())\n h_inf = np.mean((1 - log_g + log_f) / (self.y - self.err_inf) ** 2)\n return h_inf", "def hessian(self, params, transformed=True):\n params = np.array(params, ndmin=1)\n\n return approx_hess_cs(params, self.loglike)", "def hessian(self):\n\n with open('lig.fchk', 'r') as fchk:\n\n lines = fchk.readlines()\n hessian_list = []\n\n for count, line in enumerate(lines):\n if line.startswith('Cartesian Force Constants'):\n start_pos = count + 1\n if line.startswith('Dipole Moment'):\n end_pos = count\n\n if not start_pos and end_pos:\n raise EOFError('Cannot locate Hessian matrix in lig.fchk file.')\n\n for line in lines[start_pos: end_pos]:\n # Extend the list with the converted floats from the file, splitting on spaces and removing '\\n' tags.\n hessian_list.extend([float(num) * 0.529 for num in line.strip('\\n').split()])\n\n hess_size = 3 * len(self.molecule.molecule['input'])\n\n hessian = zeros((hess_size, hess_size))\n\n # Rewrite Hessian to full, symmetric 3N * 3N matrix rather than list with just the non-repeated values.\n m = 0\n for i in range(hess_size):\n for j in range(i + 1):\n hessian[i, j] = hessian_list[m]\n hessian[j, i] = hessian_list[m]\n m += 1\n\n check_symmetry(hessian)\n\n return hessian", "def hessian(beta, X):\n w = sigmoid(np.dot(X, beta))\n w_vector = w * (1-w)\n \n return np.dot(X.T, X*w_vector)", "def hessian(f, s, p, dx=1e-6, gmix=False, k =['All']):\n import numpy\n N = (p.m['n'] - 1)\n H = numpy.zeros(shape=(N,N))\n for m in range(1, N + 1):\n for z in range(1, N + 1):\n H[m - 1, z - 1] = FD(f, s, p, 2, z, m, dx, gmix, k)\n \n return H", "def sum_hessian(self, module, g_inp, g_out):\n return self._sum_hessian(module, g_inp, g_out)", "def hessian(x):\n x_grad = np.gradient(x) \n hessian = np.empty((x.ndim, x.ndim) + x.shape, dtype=x.dtype) \n for k, grad_k in enumerate(x_grad):\n \"\"\"\n iterate over dimensions\n apply gradient again to every component of the first derivative.\n \"\"\"\n tmp_grad = np.gradient(grad_k) \n for l, grad_kl in enumerate(tmp_grad):\n hessian[k, l, :, :] = grad_kl\n return hessian", "def sum_hessian(\n self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor]\n ) -> Tensor:\n self._check_2nd_order_make_sense(module, g_out)\n return self._sum_hessian(module, g_inp, g_out)", "def calculate_hessian(y, tx, w):\n txw = tx.dot(w)\n diag = sigmoid(txw)*(np.ones(txw.shape)-sigmoid(txw))\n return np.matmul(np.multiply(tx,diag).T,tx)", "def get_hessian(phi, pred, t, dot_product, reg= 1, regression= \"logistic\"):\n R = np.eye(pred.shape[0])\n if regression == \"logistic\":\n for i in range(pred.shape[0]):\n R[i,i] = pred[i,0] * (1- pred[i,0])\n elif regression == \"probit\":\n for i in range(pred.shape[0]):\n y_n = pred[i,0]\n t_n = t[i,0] \n dotp = dot_product[i, 0]\n pdf = norm.pdf(dotp)\n\n term1 = 1/ (y_n * (1- y_n) + TOLERANCE)\n term2 = (y_n - t_n)/(y_n**2 * (1- y_n) + TOLERANCE)\n term3 = (y_n - t_n)/((1- y_n)**2 * y_n + TOLERANCE)\n term4 = (y_n - t_n)* dotp/(y_n * (1- y_n) * pdf + TOLERANCE)\n\n R[i,i] = (term1 - term2 + term3 - term4)*(pdf**2)\n\n # Add regularization\t\t\t\n hessian = np.matmul(np.matmul(phi.T, R), phi) + np.eye(phi.shape[1])/reg\n return hessian", "def hessian(f, x, s=_DEFAULT_STEP):\n x = np.asarray(x)\n n = len(x)\n e = s * np.eye(n)\n\n forw1 = np.zeros(n)\n forw2 = np.zeros((n, n))\n for i in range(n):\n forw1[i] = f(x + e[i])\n for j in range(i, n):\n forw2[i, j] = forw2[j, i] = f(x + e[i] + e[j])\n\n H = (forw2 - _colvec(forw1) - _rowvec(forw1) + f(x)) / s**2\n return H", "def approx_hessian(f, x, epsilon, args=()):\n n = x.shape[0]\n npts = 1\n if len(x.shape) > 1:\n npts = x.shape[1]\n H = np.zeros((n, n, npts))\n ei = np.zeros(n)\n for i in range(n):\n ei[i] = .5 * epsilon\n g1 = approx_gradient(f, (x.T + ei).T, epsilon, args=args)\n g2 = approx_gradient(f, (x.T - ei).T, epsilon, args=args)\n H[i, ...] = np.reshape((g1 - g2) / epsilon, (n, npts))\n ei[i] = 0\n return H.squeeze()", "def hes_res(self, params, **kwargs):\n e = kwargs.get(\"e\", self.problem.data_e)\n\n hes = self.hessian.eval(params, **kwargs)\n for i, e_i in enumerate(e):\n hes[:, :, i] = - hes[:, :, i] / e_i\n\n return hes, self.jac_res(params, **kwargs)", "def grid_hessian(self, gridaxes):\n assert np.isscalar(self.dim), 'Hessian only implemented for scalar and vector functions'\n assert len(gridaxes) == self.sdim, \"Input has wrong dimension\"\n colloc = [collocation_derivs(self.kvs[i], gridaxes[i], derivs=2) for i in range(self.sdim)]\n\n d = self.sdim\n n_hess = ((d+1)*d) // 2 # number of components in symmetric part of Hessian\n N = tuple(len(g) for g in gridaxes) # shape of tensor grid\n\n # determine size of output array\n if self.dim == 1:\n out_shape = N + (n_hess,)\n else:\n out_shape = N + (self.dim, n_hess)\n hess = np.empty(out_shape, dtype=self.coeffs.dtype)\n\n i_hess = 0\n for i in reversed(range(self.sdim)): # x-component is the last one\n for j in reversed(range(i+1)):\n # compute vector of derivative indices\n D = self.sdim * [0]\n D[i] += 1\n D[j] += 1\n ops = [colloc[k][D[k]] for k in range(self.sdim)] # derivatives in directions i,j\n\n if self.dim == 1: # scalar function\n hess[..., i_hess] = apply_tprod(ops, self.coeffs) # D_i D_j (self)\n else: # vector function\n for k in range(self.dim):\n hess[..., k, i_hess] = apply_tprod(ops, self.coeffs[..., k]) # D_i D_j (self[k])\n i_hess += 1\n return hess # shape: shape(grid) x self.dim x n_hess", "def hessian(self,x=None,y=None,save=True):\n\n\t\tif (x is not None) and (y is not None):\n\n\t\t\tassert x.shape==y.shape,\"x and y must have the same shape!\"\n\n\t\t\t#x coordinates\n\t\t\tif type(x)==u.quantity.Quantity:\n\t\t\t\n\t\t\t\tassert x.unit.physical_type==self.side_angle.unit.physical_type\n\t\t\t\tj = np.mod(((x / self.resolution).decompose().value).astype(np.int32),self.data.shape[1])\n\n\t\t\telse:\n\n\t\t\t\tj = np.mod((x / self.resolution.to(u.rad).value).astype(np.int32),self.data.shape[1])\t\n\n\t\t\t#y coordinates\n\t\t\tif type(y)==u.quantity.Quantity:\n\t\t\t\n\t\t\t\tassert y.unit.physical_type==self.side_angle.unit.physical_type\n\t\t\t\ti = np.mod(((y / self.resolution).decompose().value).astype(np.int32),self.data.shape[0])\n\n\t\t\telse:\n\n\t\t\t\ti = np.mod((y / self.resolution.to(u.rad).value).astype(np.int32),self.data.shape[0])\n\n\t\telse:\n\t\t\ti = None\n\t\t\tj = None\n\n\t\t#Call the C backend\n\t\thessian_xx,hessian_yy,hessian_xy = _topology.hessian(self.data,j,i)\n\t\t\n\t\t#Return the hessian\n\t\tif (x is not None) and (y is not None):\n\n\t\t\treturn hessian_xx.reshape(x.shape),hessian_yy.reshape(x.shape),hessian_xy.reshape(x.shape)\n\n\t\telse:\n\n\t\t\tif save:\n\t\t\t\tself.hessian_xx = hessian_xx\n\t\t\t\tself.hessian_yy = hessian_yy\n\t\t\t\tself.hessian_xy = hessian_xy\n\n\t\t\treturn hessian_xx,hessian_yy,hessian_xy", "def make_hessian_mat_prod(self, module, g_inp, g_out):\n return self._make_hessian_mat_prod(module, g_inp, g_out)", "def test_hessian():\n x, y = fwd.Variable(), fwd.Variable()\n rosen = 100.0*(y - x**2)**2 + (1 - x)**2.0\n rosen_hessian = lambda x, y: \\\n np.array([[1200*x**2-400*x+2, -400*x],\n [-400*x, 200]])\n rosen_hessian_returned = rosen.hessian_at({x: 1.0, y: 1.0})\n rosen_hessian_expected = rosen_hessian(1.0, 1.0)\n for i in range(2):\n for j in range(2):\n assert equals(rosen_hessian_returned[i, j],\n rosen_hessian_expected[i, j])", "def approx_hessian(f, x, epsilon):\n n = len(x)\n H = np.zeros((n, n))\n ei = np.zeros(n)\n for i in range(n):\n ei[i] = .5 * epsilon\n g1 = approx_gradient(f, x + ei, epsilon)\n g2 = approx_gradient(f, x - ei, epsilon)\n H[i, :] = (g1 - g2) / epsilon\n ei[i] = 0\n return H", "def compute_hessian(self, dw, trn_X, trn_y, epsilon: float = 0.01):\n norm = torch.cat([w.view(-1) for w in dw]).norm()\n eps = epsilon / norm\n\n dalpha_pos = self.finite_difference(dw, trn_X, trn_y, eps, wrt='alpha')\n dalpha_neg = self.finite_difference(dw, trn_X, trn_y, -eps, wrt='alpha')\n hessian = [(p - n) / (2. * eps) for p, n in zip(dalpha_pos, dalpha_neg)]\n return hessian", "def calculate_hessian(self, finite_step):\n\n # Create the OpenMM coords list from the qm coordinates and convert to nm\n input_coords = self.molecule.coords['qm'].flatten() * constants.ANGS_TO_NM\n\n # We get each hessian element from = [E(dx + dy) + E(-dx - dy) - E(dx - dy) - E(-dx + dy)] / 4 dx dy\n hessian = np.zeros((3 * len(self.molecule.atoms), 3 * len(self.molecule.atoms)))\n\n for i in range(3 * len(self.molecule.atoms)):\n for j in range(i, 3 * len(self.molecule.atoms)):\n # Mutate the atomic coords\n # Do less energy evaluations on the diagonal of the matrix\n if i == j:\n coords = deepcopy(input_coords)\n coords[i] += 2 * finite_step\n e1 = self.get_energy(self.format_coords(coords))\n coords = deepcopy(input_coords)\n coords[i] -= 2 * finite_step\n e2 = self.get_energy(self.format_coords(coords))\n hessian[i, j] = (e1 + e2) / (4 * finite_step**2 * self.molecule.atoms[i // 3].atomic_mass)\n else:\n coords = deepcopy(input_coords)\n coords[i] += finite_step\n coords[j] += finite_step\n e1 = self.get_energy(self.format_coords(coords))\n coords = deepcopy(input_coords)\n coords[i] -= finite_step\n coords[j] -= finite_step\n e2 = self.get_energy(self.format_coords(coords))\n coords = deepcopy(input_coords)\n coords[i] += finite_step\n coords[j] -= finite_step\n e3 = self.get_energy(self.format_coords(coords))\n coords = deepcopy(input_coords)\n coords[i] -= finite_step\n coords[j] += finite_step\n e4 = self.get_energy(self.format_coords(coords))\n hessian[i, j] = (e1 + e2 - e3 - e4) / (4 * finite_step ** 2 * self.molecule.atoms[i // 3].atomic_mass)\n\n # Now make the matrix symmetric\n sym_hessian = hessian + hessian.T - np.diag(hessian.diagonal())\n return sym_hessian" ]
[ "0.7668539", "0.7445004", "0.72262585", "0.71646506", "0.7061997", "0.7034577", "0.6910204", "0.67564386", "0.67285246", "0.6721224", "0.6684816", "0.66643095", "0.6623788", "0.66084576", "0.6566024", "0.65476125", "0.6538235", "0.6438131", "0.64299875", "0.6404051", "0.63860494", "0.63262343", "0.6299066", "0.62319505", "0.6217438", "0.62154096", "0.6210375", "0.61952955", "0.61751354", "0.61585987" ]
0.7849769
0
In a navigation context, component of an equipment is a point (tag/entity)
def __getitem__(self,key): # Using [key] syntax on an equipment allows to retrieve a tag directly # or a point referred to this particular equipment for each in self.tags: if key == each: return self.tags[key] # if key not found in tags... we probably are searching a point # self will call __iter__ which will look for points in equipment for point in self: #partial_results = [] # Given an ID.... should return the point with this ID if key.replace('@','') == str(point.id).replace('@',''): return point # Given a dis or navName... should return equip if 'dis' in each.tags: if key == each.tags['dis']: return each if 'navName' in each.tags: if key == each.tags['navName']: return each if 'navNameFormat' in each.tags: if key == each.tags['navNameFormat']: return each else: try: # Maybe key is a filter_expr request = self.find_entity(key) return request.result except HaystackError as e: self._session._log.warning('{} not found'.format(key))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visit_equipment(self, equipment):", "def handle_equipment_mouseover(self):\n if self.skill_tree_displaying:\n return\n mouse_pos = pg.mouse.get_pos()\n slot_moused_over = ''\n for slot in self.equipment_tiles:\n if self.equipment_tiles[slot].collidepoint(mouse_pos):\n slot_moused_over = slot\n break\n\n if slot_moused_over:\n self.tooltip_focus = self.equipment_tiles[slot_moused_over]\n if self.player_dict['equipment'][slot_moused_over]: # i.e. if there is an item equipped in the slot\n equipment_dict = self.player_dict['equipment'][slot_moused_over].to_dict()\n else:\n equipment_dict = None\n player_panel_renderer.draw_equipment_details(equipment_dict, slot_moused_over)", "def can_traverse(self, equipment: str, point: Point) -> bool:\n region_type = self.get_region(point)\n traversable = [\n [\"torch\", \"climbing\"],\n [\"climbing\", \"neither\"],\n [\"torch\", \"neither\"]\n ]\n return equipment in traversable[region_type]", "def find_endo_apex_point(self, display_opt):\n curv = vtk.vtkCurvatures()\n# curv.SetCurvatureTypeToMean()\n curv.SetCurvatureTypeToMinimum()\n\n curv.SetInputData(self.endo_poly)\n curv.Update()\n\n curv_numpy = numpy_support.vtk_to_numpy(curv.GetOutput().GetPointData().GetScalars())\n max_curv_ptid = np.argmax(curv_numpy)\n max_curv_pt = self.endo_poly.GetPoints().GetPoint(max_curv_ptid)\n\n self.endo_apex_node = max_curv_pt\n pointActor = include_points(list(self.endo_apex_node), 1, 15, (0,0,0))\n\n sc_r = curv.GetOutput().GetScalarRange()\n\n # build lut\n scheme = 16\n colorSeries = vtk.vtkColorSeries()\n colorSeries.SetColorScheme(scheme)\n\n lut = vtk.vtkColorTransferFunction()\n lut.SetColorSpaceToHSV()\n\n numColors = colorSeries.GetNumberOfColors()\n for i in range(numColors):\n color = colorSeries.GetColor(i)\n dColor = np.zeros((3,))\n dColor[0] = color[0]/255.0\n dColor[1] = color[1]/255.0\n dColor[2] = color[2]/255.0\n t = sc_r[0] + (sc_r[1] - sc_r[0]) / (numColors-1)*i\n lut.AddRGBPoint(t, dColor[0], dColor[1], dColor[2])\n\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(curv.GetOutputPort())\n mapper.SetLookupTable(lut)\n mapper.SetScalarRange(sc_r)\n\n scalarBar = vtk.vtkScalarBarActor()\n scalarBar.SetLookupTable(mapper.GetLookupTable())\n scalarBar.SetTitle('scalar bar')\n scalarBar.SetNumberOfLabels(5)\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n\n ren = vtk.vtkRenderer()\n ren.SetBackground(1.0, 1.0, 1.0)\n ren.AddActor(actor)\n ren.AddActor(pointActor)\n ren.AddActor2D(scalarBar)\n\n\n if display_opt:\n vtk_show(ren)", "def intf_POINTFORM(E):\n # Preserve the VAL obj (don't take .val) which will compose the gg list.\n z= E.The.StackPop()\n y= E.The.StackPop()\n x= E.The.StackPop()\n p= objectifier.StackOB_LST([x, y, z])\n p.names= ['x','y','z']\n E.The.StackPush(p)", "def __handle_view_item(self, gamestate_component):", "def current_entity(self) -> Optional[DXFGraphic]:\n return self.entity_stack[-1][0] if self.entity_stack else None", "def test_visualize_equipment(self):\n pass", "def visit_character(self, character):\n self.visit_container(character)\n # self.connect_equipment(character)", "def visit_entity(self, entity):", "def find_epi_apex_point(self, display_opt):\n curv = vtk.vtkCurvatures()\n curv.SetCurvatureTypeToMean()\n# curv.SetCurvatureTypeToMinimum()\n # curv.SetCurvatureTypeToMaximum()\n# curv.SetCurvatureTypeToGaussian()\n\n curv.SetInputData(self.epi_poly)\n curv.Update()\n\n curv_numpy = numpy_support.vtk_to_numpy(curv.GetOutput().GetPointData().GetScalars())\n max_curv_ptid = np.argmax(curv_numpy)\n max_curv_pt = self.epi_poly.GetPoints().GetPoint(max_curv_ptid)\n\n self.epi_apex_node = max_curv_pt\n pointActor = include_points(list(self.epi_apex_node), 1, 15, (0,0,0))\n\n sc_r = curv.GetOutput().GetScalarRange()\n\n # build lut\n scheme = 16\n colorSeries = vtk.vtkColorSeries()\n colorSeries.SetColorScheme(scheme)\n\n lut = vtk.vtkColorTransferFunction()\n lut.SetColorSpaceToHSV()\n\n numColors = colorSeries.GetNumberOfColors()\n for i in range(numColors):\n color = colorSeries.GetColor(i)\n dColor = np.zeros((3,))\n dColor[0] = color[0]/255.0\n dColor[1] = color[1]/255.0\n dColor[2] = color[2]/255.0\n t = sc_r[0] + (sc_r[1] - sc_r[0]) / (numColors-1)*i\n lut.AddRGBPoint(t, dColor[0], dColor[1], dColor[2])\n\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(curv.GetOutputPort())\n mapper.SetLookupTable(lut)\n mapper.SetScalarRange(sc_r)\n\n scalarBar = vtk.vtkScalarBarActor()\n scalarBar.SetLookupTable(mapper.GetLookupTable())\n scalarBar.SetTitle('scalar bar')\n scalarBar.SetNumberOfLabels(5)\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n\n ren = vtk.vtkRenderer()\n ren.SetBackground(1.0, 1.0, 1.0)\n# ren.AddActor(self.meshActor)\n ren.AddActor(actor)\n ren.AddActor(pointActor)\n ren.AddActor2D(scalarBar)\n\n if display_opt:\n vtk_show(ren)", "def get(self, entity):\n\t\treturn entity.get_component(self.component_type)", "def _point(self):\n raise NotImplementedError", "def pickUp(self):\n pos = self.getRoverLocation()\n item = self.map[pos.y,pos.x]\n if type(item) == Part:\n self.inventory.addPart(str(item))\n self.map[pos.y,pos.x] = None", "def __handle_view_door(self, gamestate_component):", "def setClickedComponent(self, clickedComponent: java.awt.Component, vertexBasedPoint: java.awt.geom.Point2D) -> None:\n ...", "def set_apex_node(self):\n if self.opt == 'CT':\n self.epi_apex_node = self.mesh_poly.GetPoints().GetPoint(3604)\n self.endo_apex_node = self.mesh_poly.GetPoints().GetPoint(3579)\n else:\n self.endo_apex_node = None # we do not know this\n self.epi_apex_node = self.mesh_poly.GetPoints().GetPoint(0)", "def get_from_context(cls, context):\n obj = context.active_object\n\n if obj and obj.type not in {\"LAMP\", \"CAMERA\"}:\n mat = obj.active_material\n\n if mat:\n # ID pointer\n node_tree = mat.appleseed.osl_node_tree\n\n if node_tree:\n return node_tree, mat, mat\n\n elif obj and obj.type == \"LAMP\":\n node_tree = obj.data.appleseed.osl_node_tree\n\n if node_tree:\n return node_tree, None, None\n\n return None, None, None", "def GetPosition(self):\n ...", "def intf_ENTPTS(E):\n if not inc.entid_or_LST_of_entids(E.The,1):\n print(\"Input Error: pts\")\n print(intf_ENTPTS.__doc__)\n return # Without doing much of anything.\n myeids= E.The.StackPop().val\n if type(myeids)==type(list()):\n #myeids= map(lambda x:x.val, myeids) # Should now be a list of ints.\n myeids= [x.val for x in myeids] # Should now be a list of ints.\n else:\n myeids= [ myeids ] # Also a (1 item) list of ints.\n for myeid in myeids:\n # NEEDS TO CHECK IF EID EXISTS!\n if myeid in MMEL.El: # Check if eid exists.\n pids= MMEL.El[myeid].epts # List of point IDs for this entity.\n for pid in pids:\n x= mm.Entity.allplist.PLdict[pid].x\n y= mm.Entity.allplist.PLdict[pid].y\n z= mm.Entity.allplist.PLdict[pid].z\n z= objectifier.StackOB_VAL(z) # Can't be just regular Python ints.\n y= objectifier.StackOB_VAL(y)\n x= objectifier.StackOB_VAL(x)\n p= objectifier.StackOB_LST([x, y, z])\n p.names= ['x','y','z']\n E.The.StackPush(p)\n else:\n print(\"Warning: No entity #%d. Skipping.\" % myeid)", "def targ_ent(self, ent: 'Entity') -> None:\n if ent['origin']:\n self.target = Vec.from_str(ent['origin'])", "def infer_entity(self, tagset, identifier=None, equip_ref=None):\n triples = []\n infer_results = []\n if identifier is None:\n raise Exception(\"PROVIDE IDENTIFIER\")\n\n # handle Site\n if \"site\" in tagset and \"equip\" not in tagset and \"point\" not in tagset:\n triples.append((self._BLDG[identifier.replace(\" \", \"_\")], A, BRICK.Site))\n return triples, [(identifier, list(tagset), [BRICK.Site])]\n\n # take into account 'equipref' to avoid unnecessarily inventing equips\n if equip_ref is not None:\n equip_entity_id = equip_ref\n inferred_equip_classes = []\n else:\n non_point_tags = set(tagset).difference(self._point_tags)\n non_point_tags.add(\"equip\")\n inferred_equip_classes, leftover_equip = self.most_likely_tagsets(\n non_point_tags\n )\n inferred_equip_classes = [\n c for c in inferred_equip_classes if self._is_equip(c)\n ]\n equip_entity_id = identifier.replace(\" \", \"_\") + \"_equip\"\n\n # choose first class for now\n point_entity_id = identifier.replace(\" \", \"_\") + \"_point\"\n\n # check if this is a point; if so, infer what it is\n if set(tagset).intersection(self._point_tags):\n tagset = set(tagset).difference(set([\"equip\"]))\n inferred_point_classes, leftover_points = self.most_likely_tagsets(tagset)\n inferred_point_classes = [\n c for c in inferred_point_classes if self._is_point(c)\n ]\n if len(inferred_point_classes) > 0:\n triples.append(\n (self._BLDG[point_entity_id], A, BRICK[inferred_point_classes[0]])\n )\n triples.append(\n (\n self._BLDG[point_entity_id],\n RDFS.label,\n rdflib.Literal(identifier),\n )\n )\n infer_results.append((identifier, list(tagset), inferred_point_classes))\n\n if len(inferred_equip_classes) > 0:\n triples.append(\n (self._BLDG[equip_entity_id], A, BRICK[inferred_equip_classes[0]])\n )\n triples.append(\n (\n self._BLDG[equip_entity_id],\n BRICK.hasPoint,\n self._BLDG[point_entity_id],\n )\n )\n triples.append(\n (\n self._BLDG[equip_entity_id],\n RDFS.label,\n rdflib.Literal(identifier + \" equip\"),\n )\n )\n triples.append(\n (\n self._BLDG[point_entity_id],\n RDFS.label,\n rdflib.Literal(identifier + \" point\"),\n )\n )\n infer_results.append((identifier, list(tagset), inferred_equip_classes))\n return triples, infer_results", "def equipment(self, equipment):\n\n self._equipment = equipment", "def test_entity_view(self):\n ps = PhysicsState(None, self.proto_state)\n self.assertEqual(ps[0].name, 'First')\n entity = ps[0]\n self.assertTrue(isinstance(entity, _EntityView))\n\n self.assertEqual(entity.x, 10)\n self.assertEqual(entity.y, 20)\n self.assertEqual(entity.vx, 30)\n self.assertEqual(entity.vy, 40)\n self.assertEqual(entity.spin, 50)\n self.assertEqual(entity.fuel, 60)\n self.assertEqual(entity.landed_on, '')\n self.assertEqual(entity.throttle, 70)\n\n ps.y0()\n self.assertEqual(entity.heading, 7 % (2 * np.pi))\n\n ps[0].landed_on = 'Second'\n self.assertEqual(entity.landed_on, 'Second')\n entity.x = 500\n self.assertEqual(ps[0].x, 500)\n entity.pos = np.array([55, 66])\n self.assertEqual(ps['First'].x, 55)\n self.assertEqual(ps['First'].y, 66)", "def getX(self):\n return self.components[0]", "def getX(self):\n return self.components[0]", "def get_element_from_screen_position(self, point):\n point = (point[0] - self.rect.x, point[1] - self.rect.y)\n cases = []\n for case in self.group:\n if case.rect.collidepoint(point):\n cases.append(case)\n\n if cases:\n dist_from_case = [\n (pygame.math.Vector2(case.center).distance_to(point), i)\n for i, case\n in enumerate(cases)]\n\n i = min(dist_from_case)[1]\n cases[i].activate()\n if self.current_case is not None and \\\n self.current_case is not cases[i]:\n self.current_case.deactivate()\n\n self.current_case = cases[i]\n return self.current_case", "def get_current_ee_pose(self):\n #self.arm_endpoint = #magic tf call that I can add\n while True:\n try:\n translation, rotation = self.listener.lookupTransform('world_frame', 'palm_frame', rospy.Time()) #j2s7s300_end_effector\n break # once the transform is obtained move on\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue # if it fails try again\n point = [translation[0], translation[1], translation[2]]\n self.arm_endpoint = np.array(point)\n # rospy.logerr(self.arm_endpoint)", "def test_get_vertex_on_processor(self):\n subv = list()\n for i in range(5):\n subv.append(SimpleMachineVertex(None, \"\"))\n\n pl = list()\n for i in range(4):\n pl.append(Placement(subv[i], 0, 0, i))\n\n pls = Placements(pl)\n for i in range(4):\n self.assertEqual(pls.get_vertex_on_processor(0, 0, i), subv[i])\n\n self.assertEqual(pls.get_placement_of_vertex(subv[0]), pl[0])", "def pick_loc(self, event, x):\n #print(event, x)\n self.vtkWidget.iren.RemoveObservers('RightButtonPressEvent')\n loc = event.GetEventPosition()\n\n # Currently this only allow one pick points, but in the future, more reference points may be needed\n if self.pnt is None: # Check no points are already picked\n self.pnt = vtkRenWin.Pick_point(self.renWin, loc)\n else:\n show_message(\"A point is already set as the reference.\\n\"\n \"Clear the picked points to change reference\",\n message_type=\"info\")\n #vtkRenWin.mark(self.renWin,self.pnt[0],self.pnt[1],self.pnt[2])\n # print(self.pnt)" ]
[ "0.6163354", "0.54555136", "0.5393153", "0.5236375", "0.523499", "0.51858884", "0.49860734", "0.48600116", "0.4857848", "0.48537073", "0.485229", "0.48440015", "0.48133153", "0.47727907", "0.47712675", "0.47643167", "0.47142008", "0.4710021", "0.46757096", "0.4671051", "0.4660493", "0.46561432", "0.463871", "0.46384633", "0.460986", "0.460986", "0.46039018", "0.4602318", "0.4602307", "0.4595809" ]
0.5729084
1
When iterating over an equipment, we iterate points.
def __iter__(self): for point in self.points: yield point
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n pt = (self.x, self.y)\n for i in pt:\n yield i", "def __iter__(self):\n return self.points.__iter__()", "def points(self):\n try:\n return self._list_of_points\n except AttributeError:\n print('Reading points for this equipment...')\n self._add_points()\n return self._list_of_points", "def __iter__(self) -> Iterable[Union[Point, LabwareLike]]:\n return iter(\n (\n self._point,\n self._labware,\n )\n )", "def __iter__(self):\n for coord in self.position:\n yield coord", "def enumerate_points(self):\n\t\traise Exception(NotImplemented)", "def iter_points(self):\n for x in range(self.left, self.right + 1):\n for y in range(self.top, self.bottom + 1):\n yield Point(x, y)", "def __iter__(self) -> Iterable[Tuple[float, float]]:\n return iter([self.x, self.y])", "def __iter__(self):\n yield self._x\n yield self._y", "def __iter__(self):\n return self.coords.__iter__()", "def __iter__(self):\n for idx in range(0, self.Npoints):\n position = self.start + (self.end-self.start)/self.Npoints*idx\n yield position\n raise StopIteration()", "def visit_equipment(self, equipment):", "def iter_coordinates(self):\n for coord in self.position:\n yield coord", "def __iter__(self):\n for p in self.positions(): # use same order as positons()\n yield p.element() # but yield each element", "def __iter__(self):\n for p in self.positions():\n yield p.element()", "def points_generator(self):\n rows, cols = self.game.board.board_size\n points = [Point(i, j) for i, j in product(range(rows), range(cols))]\n for point in points:\n yield point", "def get_points(self) -> typing.Iterable[float]:\n raise NotImplementedError()", "def __iter__(self):\n for key in sorted(self._points):\n yield key", "def __getitem__(self, i):\n return self.__points[i]", "def test_points_calculation(self):\n\n assert self.test_shape.points == [\n (1030.0, 525.0),\n (1030.0, 475.0),\n (970.0, 475.0),\n (970.0, 525.0),\n ]", "def __iter__(self):\n yield self.x\n yield self.y\n # Or, you could also do:\n # return iter([self.x, self.y])", "def testGetPointsGeodesic(self):\n\n for i in range(10):\n self.basicGetPointsGeodesic()", "def iter_points(self, point_uuids=None):\n\n if point_uuids is None:\n for point in self._points.values():\n yield Point.from_point(point)\n else:\n for point_uuid in point_uuids:\n yield Point.from_point(self._points[point_uuid])", "def __iter__(self):\n for label, coord_seq in self.coords.items():\n for coordinate in coord_seq:\n yield (label, tuple(coordinate),)", "def __iter__(self):\n for p in self.positions(): # use same order as positions()\n yield p.element() # but yield each element", "def __iter__(self):\r\n curpt, done = [0,0], numpy.zeros(self.shape, dtype='bool')\r\n while not done.all():\r\n #The following line computes the nearest remaining point...\r\n closest = numpy.unravel_index((numpy.where(done, numpy.inf,\r\n (numpy.sum((self.points - curpt)**2, axis=-1))) if\r\n self.smartiter else done).argmin(), self.shape)\r\n #...but only if self.smartiter is true.\r\n done[closest] = True\r\n curpt = self.points[closest]\r\n yield closest", "def __next__(self):\n if self.iterator < len(self.points):\n iterator = self.iterator\n self.iterator += 1\n return self.points[iterator]\n else:\n raise StopIteration", "def g_xy(self):\n for x in range(self.size.x):\n for y in range(self.size.y):\n yield self.p[0] + Vect(x, y)", "def __iter__(self):\n return self.cli.essids.essids().__iter__()", "def __iter__(self):\n return self.essids.__iter__()" ]
[ "0.6738365", "0.67296696", "0.65510035", "0.6479246", "0.6224848", "0.6224676", "0.6217917", "0.6125468", "0.61245775", "0.6047802", "0.6040413", "0.60280716", "0.5998997", "0.58142966", "0.5806075", "0.5755057", "0.5699067", "0.56640595", "0.56099945", "0.55949754", "0.55883384", "0.5585104", "0.5581174", "0.55716926", "0.55464506", "0.553684", "0.5518002", "0.5513075", "0.5471734", "0.54638225" ]
0.68680793
0
Retrieve an instance of the equip this entity is linked to.
def get_equip(self, callback=None): return self._session.get_entity(self.tags['equipRef'], callback=callback, single=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_equipment(self):\r\n eq = self._pvsr.getEquipmentByName(self._meas[\"equipment\"])\r\n if eq is None:\r\n site = self._pvsr.getSiteByName(self._default_site)\r\n if site is None:\r\n logging.info(\"Creating new default site {0}\".format(self._default_site))\r\n site = self._pvsr.create_pvsr_object(\"Site\")\r\n site.ParentId = 1\r\n site.Name = self._default_site\r\n site=self._pvsr.addSite(site)\r\n else:\r\n logging.debug(\"Default site ID is {0}\".format(site.Id))\r\n \r\n logging.info(\"Creating new equipment: {0}\".format(self._meas[\"equipment\"]))\r\n if self._meas[\"collector_type\"] == 'J':\r\n eq = self._pvsr.create_pvsr_object(\"JagaEquipment\")\r\n eq.ASCII_0000_EQ_COLL_KEY = self._meas[\"equipment\"] + \"key\"\r\n elif self._meas[\"collector_type\"] == 'Y':\r\n eq = self._pvsr.create_pvsr_object(\"SynthTransEquipment\")\r\n else:\r\n raise ValueError(\"The equipment does not exist in PVSR\") \r\n eq.Name = self._meas[\"equipment\"]\r\n eq.ParentId = site.Id\r\n eq.CollectorType = self._meas[\"collector_type\"]\r\n eq.IntervalInSec = 300\r\n eq.RetainRawData = 365\r\n eq.CollectData = \"Yes\"\r\n \r\n eq = self._pvsr.addEquipment(eq)\r\n logging.info(\"Added equipment {0}, id: {1}\".format(self._meas[\"equipment\"],eq.Id))\r\n else:\r\n logging.debug(\"Found equipment: {0}, id: {1}\".format(self._meas[\"equipment\"],eq.Id))\r\n return eq", "def get_equipment(self, name):\n db = self.session\n try:\n eqrow = db.query(models.Equipment).filter(models.Equipment.name.contains(name)).one()\n except config.NoResultFound as err:\n raise config.ConfigError(\"Bad equipment name %r: %s\" % (name, err))\n return EquipmentRuntime(eqrow, \"unspecified\", self.get_logfile(), db)", "def equip_item(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Actions/Items/EquipItem/\"))", "def robot(self):\n return equipment_module.Equipment(self._get_attr('robot_id'))", "def find_entity(self, filter_expr=None, limit=None,\n single=False, callback=None):\n equip_ref = hszinc.dump_scalar(self.id)\n if filter_expr is None:\n filter_expr = 'equipRef==%s' % equip_ref\n else:\n filter_expr = '(equipRef==%s) and (%s)' % (equip_ref, filter_expr)\n return self._session.find_entity(filter_expr, limit, single, callback)", "def tool(self):\n return equipment_module.Equipment(self._get_attr('extraction_tool_id'))", "def get_armor_equipped(self):\n\t\treturn self.equippedArmor", "def get_equipment_slot(self, source_entity):\n open_slots = (source_entity.equipment.get_open_slots_of_type\n (self.parent.equipment_type.value))\n if len(open_slots) > 0:\n return open_slots[0]\n else:\n return (source_entity.equipment.get_slots_of_type\n (self.parent.equipment_type.value))[0]", "def robot(self):\n return equipment_module.Equipment(\n self._get_attr('extraction_robot_id'))", "def get_inventory():\n return INVENTORY", "def get_equipment_from_inventory(self):\n return [x for x in self.inventory if x.is_equip()]", "def _get_instance(self):\n #return '_earth_instance_' + rospy.get_name().strip('/')\n return self.instance", "def getEquipmentByEquipmentId(equipment_id):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n userEquipmentInsertQuery = \"SELECT * FROM equipment WHERE equipment_id =%s\"\r\n try:\r\n cursor.execute(userEquipmentInsertQuery, (equipment_id,))\r\n equipment = cursor.fetchall()\r\n return equipment\r\n except Exception:\r\n print('Error: OOPs something went wrong while getting the equipment by equipment id!')\r\n finally:\r\n cursor.close()\r\n db.close()", "def get_main_hand_equipped(self):\n\t\treturn self.equippedMainHand", "def shop_product(self):\n return self.product.get_shop_instance(self.shop)", "def get(self, ip):\n return UE.query.filter(UE.ue_ip == ip).one()", "def get(self, ip):\n return UE.query.filter(UE.ue_ip == ip).one()", "def get(self, ip):\n return UE.query.filter(UE.ue_ip == ip).one()", "def GetEntity(self):\n return self.__entity", "def equipment(self, equipment):\n\n self._equipment = equipment", "def get_inventory(self):\n raise NotImplementedError(\"Subclasses define what returning the inventory entails\")", "def entity(self):\n return self._entity", "def entity(self):\n return self._entity", "def get_instance(self, instance):\n return self._get(_instance.Instance, instance)", "def info_equipment_type_id_get(type_id):\n session = info_map.Session()\n\n q = session.query(info_map.Equipment).filter(info_map.Equipment.type == type_id)\n q_equipment = q.one_or_none()\n\n if q_equipment is not None:\n equipment = EquipmentInfo(\n type=q_equipment.type,\n name=q_equipment.name,\n group=q_equipment.group_id,\n capacity=q_equipment.capacity,\n fitting=EquipmentInfoFitting(\n cpu=q_equipment.cpu,\n powergrid=q_equipment.powergrid),\n allowed_groups=[g.group_id for g in q_equipment.groups])\n\n return equipment\n\n else:\n error = Error('Type {} Not Found'.format(type_id))\n return error, 404", "def employee(self) -> object:\n return self._employee", "def get_instance(self, ix=None, name=None):\n assert ix is None or name is None\n if ix is None:\n instance = [ex for ex in self.instances if ex.name == name]\n assert len(instance) == 1\n return instance[0]\n else:\n return self.instances[ix]", "def commodity_instance(self) -> str:\n return pulumi.get(self, \"commodity_instance\")", "def GetEntityByItem(self,i):\n\t\treturn self.Space.Item(i)", "def get_instance(self, name):\n return self.store.instance.id" ]
[ "0.6793719", "0.6646895", "0.6620147", "0.61004996", "0.60058075", "0.5998487", "0.5941672", "0.58856815", "0.58801144", "0.5800163", "0.57987654", "0.5727742", "0.568204", "0.5605969", "0.55737144", "0.5561501", "0.5561501", "0.5561501", "0.55327797", "0.55324954", "0.5522133", "0.551829", "0.551829", "0.54641336", "0.546229", "0.5457434", "0.54283935", "0.5387177", "0.5369333", "0.5359679" ]
0.7817165
0
Get the value of the indexth node in the linked list. If the index is invalid, return 1.
def get(self, index: int) -> int: node = self.get_node(index) if node: return node.val else: return -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, index: int) -> int:\n cnt = 0\n cur = self.head \n while cur != None:\n if(cnt == index):\n return cur.val\n cur = cur.next \n cnt += 1\n return -1", "def get(self, index):\n cur = self.head\n while cur and index>0:\n cur = cur.next\n index -= 1\n if cur:\n return cur.val\n else:\n return -1", "def get(self, index):\n if index < 0 or index >= self.length:\n return -1\n curr = self.head\n for i in range(1, index + 1):\n curr = curr.next\n return curr.val", "def get(self, index: int) -> int:\n if index < 0 or self.size <= index:\n return -1\n curr = self.head\n for _ in range(index + 1):\n curr = curr.next\n return curr.value", "def value_at(self, index):\n if index==0:\n return self.head.val\n\n temp_node = self.head\n for _ in range(index):\n temp_node = temp_node.next\n return temp_node.val", "def get(self, index):\n if index < 0 or index >= self._size:\n return -1\n\n current = self._head\n for _ in range(index):\n current = current.next\n return current.val", "def get(self, index):\n if index < 0 or index >= self.size:\n return -1\n\n if self.head is None:\n return -1\n\n curr = self.head\n for i in range(index):\n curr = curr.next\n return curr.val", "def get(self, index):\n if index >= self.len:\n return -1\n p = self.head.next\n while index > 0:\n index -= 1\n p = p.next\n return p.val", "def get(self, index: int) -> int:\n curr = self.head\n count = 0\n if self.head is None:\n return -1\n if index == 0:\n return self.head.data\n while curr:\n if count == index:\n return curr.data\n count += 1\n curr = curr.next\n return -1", "def item_at_index(self, index):\n if index < 0 or index >= self.size:\n return -1\n\n if self.head is None:\n return -1\n\n curr = self.head\n for i in range(index):\n curr = curr.next\n return curr.val", "def get(self, index=0):\n\n # Error case: Index out of acceptable range\n if index < 0 or index >= self._size:\n raise RangeError(\"index out of range.\")\n\n i = 0\n current_node = self._head\n\n while(i < index):\n current_node = current_node.next\n i += 1\n\n return current_node.value", "def value_at(self, index):\n if self.empty():\n return \"Linked List Empty\"\n\n idx = 1\n l = self.head\n while l.next is not None:\n if idx is index:\n break\n\n l = l.next\n idx += 1\n return l.data", "def get(self, index: int) -> int:\n if index + 1 >self.cnt:\n return -1\n\n tmp = self.dummy\n for i in range(index+1):\n tmp = tmp.next\n return tmp.val", "def get(self, index: int) -> int:\n if index < 0 or index >= self.size: return -1\n \n # choose search from head or tail\n if index + 1 < self.size - index:\n ptr = self.head\n for _ in range(index + 1):\n ptr = ptr.next\n else: # from tail\n ptr = self.tail\n for _ in range(self.size - index):\n ptr = ptr.prev\n return ptr.val", "def read_index(self, index):\n current = self.head\n if index == 0:\n return current.data\n elif index >= self.size() :\n return None\n else:\n position = 0\n while position < index:\n current = current.next_node\n position += 1\n return current.data", "def get(self, index):\n if index < 0:\n return -1\n # print('index:',index)\n p = self.head\n while index and p:\n p = p.next\n index -= 1\n # print('after,index:',index)\n if index:\n return -1\n if p and p.next:\n return p.next.val\n return -1\n # self.printList()", "def get(self, index):\n \n cur = self.head\n i = 0\n while i < index and cur:\n cur = cur.nxt\n i+=1\n# self.display(\"get , fetching the value at index \"+str(index)) \n if cur:\n print(\"found value %d\" %cur.val)\n return cur.val\n else:\n return -1", "def _value_at(self, index):\n node = self._get_node_at(index)\n if node is None:\n raise IndexError('List index out of range.')\n return node.value", "def _value_at(self, index):\n node = self._get_node_at(index)\n if node is None:\n raise IndexError('List index out of range.')\n return node.value", "def get(self, index):\r\n if index >= self.length():\r\n print(\"ERROR\")\r\n return None\r\n current_index = 0\r\n current_node = self.head\r\n while True:\r\n current_node = current_node.next\r\n if current_index == index: return current_node.data\r\n current_index += 1", "def get(self, index):\n if 0 <= index <= len(self.nums):\n return self.nums[index]\n return -1", "def _get_node_at(self, index):\n assert isinstance(index, int) \n if index >= 0: \n steps = index \n else:\n steps = self.size() + index\n if steps < 0:\n return None \n node = self.head\n while steps > 0 and node is not None:\n node = node.next_node\n steps -= 1 \n return node", "def getNode_at(self, index):\n if self.empty():\n return \"Linked List Empty\"\n\n idx = 1\n l = self.head\n while l.next is not None:\n if idx is index:\n break\n\n l = l.next\n idx += 1\n return l", "def _get_node(self, index):\r\n\t\tself._validate_index(index)\r\n\t\treturn self._traverse(lambda i, list: i < index)[\"node\"]", "def __getitem__(self, index):\n node = self.head\n index += 1\n for level in reversed(range(self.max_levels)):\n while node.width[level] <= index:\n index -= node.width[level]\n node = node.next[level]\n return node.value", "def __get_node_at_index(self, index: int) -> Node[T]:\n if 0 <= index and index < len(self):\n current = self.head\n for i in range(index):\n current = current.link\n return current\n else:\n raise ValueError(\"Index out of bounds\")", "def _get_node_at(self, index):\n assert isinstance(index, int)\n from_head = True if index >= 0 else False \n if from_head: \n node = self.head\n steps = index \n else:\n node = self.tail \n steps = abs(index) -1 \n while steps > 0 and node is not None:\n node = node.next_node if from_head else node.prev_node \n steps -= 1 \n return node", "def get(self, index):\n count = 0\n x = self.begin\n\n while count != index:\n x = x.next\n count += 1\n\n return x.value", "def __find_node_index(self, index):\n cur_index = 0\n cur_node = self.head\n prev_node = None\n while cur_node is not None:\n if index >= len(cur_node.data_list) + cur_index:\n cur_index += len(cur_node.data_list)\n prev_node = cur_node\n cur_node = cur_node.next_node\n else:\n index -= cur_index\n break\n return index, prev_node, cur_node", "def get_node_from_index(self, index):\n curr = self.head\n for i in range(index):\n curr = curr.next\n return curr" ]
[ "0.82228374", "0.810132", "0.798225", "0.797223", "0.7966857", "0.7952263", "0.7949584", "0.7928148", "0.79057187", "0.78002805", "0.7736932", "0.77319723", "0.7650963", "0.75840414", "0.7507434", "0.74747777", "0.7453931", "0.7387386", "0.7387386", "0.7384746", "0.73363584", "0.72755086", "0.7229427", "0.72067606", "0.7105351", "0.7092481", "0.70126134", "0.7005471", "0.6904427", "0.6828003" ]
0.83084565
0
From self ordered list of steps return theirs objects.
def get_steps(self) -> list: ret_val = [] for step_id in self: step_body = Steps.cache_step(step_id) if step_body is not None: ret_val.append(step_body) return ret_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def steps(self):\n for step in self._get_paged(\"steps\", trailing=True):\n yield self.__get_object(step)\n\n return", "def getSteps():", "def steps(self):\n for step in self._steps:\n yield step", "def get_next_steps(self, steps):\n step_list = []\n\n steps_remaining = set(steps.keys())\n counter = 0\n max_counter = 10000\n next_steps = set()\n\n for step in steps_remaining:\n dependencies = steps[step]\n if len(dependencies) == 0:\n next_steps.add(step)\n\n # this is the list of things that can be take for work now\n return sorted(next_steps)", "def next_steps(self) -> List[FlowNode]:\n return [node for predicate, node in self._current_step.children]", "def get_steps(self):\n return self.steps", "def _get_steps(self):\n return self.steps", "def __init__(self):\n self.step_list = [steps.Raw()]", "def subclass_steps(cls):\n steps = []\n for attrname in dir(cls):\n obj = getattr(cls, attrname)\n if isinstance(obj, type) and issubclass(obj, StepBaseAbs):\n steps.append(obj)\n return steps", "def steps(self):\n return [\n MRStep(mapper=self.mapper_data_cleaning,\n reducer=self.reducer_data_cleaning\n ),\n MRStep(\n mapper_init=self.mapper_get_items_init,\n mapper=self.mapper_get_items,\n combiner=self.combiner_count_items,\n reducer=self.reducer_total_items\n )\n ]", "def get_steps(self):\n return self.steps", "def test_structure_to_steps(self):\n test_structure = self.test_structure\n classes = []\n counter = 1\n for c in test_structure:\n current = []\n for i in range(c):\n current.append(counter)\n counter += 1\n classes.append(current)\n return classes", "async def _get_steps(self):\n logger.debug('Getting steps & collections')\n steps = self.run.plan.steps\n collections = await gen.multi(\n [self._pool.request_instances(\n self.run.uuid,\n s.uuid,\n count=s.instance_count,\n inst_type=s.instance_type,\n region=s.instance_region,\n plan=self.run.plan.name,\n owner=self.run.owner,\n run_max_time=s.run_delay + s.run_max_time)\n for s in steps])\n\n try:\n # First, setup some dicst, all keyed by step.uuid\n steps_by_uuid = {x.uuid: x for x in steps}\n step_records_by_uuid = {x.step.uuid: x for x in\n self.run.step_records}\n\n # Link the step/step_record/ec2_collection under a single\n # StepRecordLink tuple\n for coll in collections:\n step = steps_by_uuid[coll.uuid]\n step_record = step_records_by_uuid[coll.uuid]\n setlink = StepRecordLink(step_record, step, coll)\n self._set_links.append(setlink)\n\n except Exception:\n # Ensure we return collections if something bad happened\n logger.error(\"Got an exception in runner, returning instances\",\n exc_info=True)\n\n try:\n await gen.multi([self._pool.release_instances(x)\n for x in collections])\n except:\n logger.error(\"Wat? Got an error returning instances.\",\n exc_info=True)\n\n # Clear out the setlinks to make sure they aren't cleaned up\n # again\n self._set_links = []", "def merge_steps(self, steps):\n raise NotImplementedError", "def __iter__( self ):\n assert isinstance( self._env, Env )\n assert isinstance( self._steps, list )\n\n return iter( self._steps )", "def iterate_steps(steps):\n pop = None\n while steps:\n for step, depends in steps.items():\n if depends == []:\n pop = step\n if not pop:\n return\n pop_step(pop, steps)\n yield pop", "def build_from_step_list(self, step_list: list):\n for step in step_list:\n self.run[step.name] = step", "def load(steps):\n loaded = []\n for s in steps:\n try:\n s.load()\n loaded.append(s)\n except:\n logging.warn('Error during step load:\\n%s' %\n util.indent(traceback.format_exc()))\n pass\n return loaded", "def availableSteps( self ):\n assert isinstance( self._level, int )\n assert isinstance( self._steps, list )\n assert isinstance( self._outter, Env ) or ( self._outter is None )\n\n lst = [ ]\n self._availableSteps( lst )\n return lst", "def as_lines(self, steps):\n res = []\n p0 = self.lerp(0)\n for step in range(steps):\n p1 = self.lerp((step + 1) / steps)\n s = Line(p0=p0, p1=p1)\n res.append(s)\n p0 = p1\n\n if self.line is not None:\n p0 = self.line.lerp(0)\n for step in range(steps):\n p1 = self.line.lerp((step + 1) / steps)\n res[step].line = Line(p0=p0, p1=p1)\n p0 = p1\n return res", "def get_steps_from_position_list(position_list):\n\n step_list = []\n\n for i in range(len(position_list) - 1):\n current_position = position_list[i]\n next_position = position_list[i+1]\n if (current_position.x == next_position.x and \n current_position.y == next_position.y):\n continue\n\n #Cardinal directions\n if (next_position.x - current_position.x == 1 and \n next_position.y - current_position.y == 0):\n next_action = \"right\"\n elif (next_position.x - current_position.x == -1 and \n next_position.y - current_position.y == 0):\n next_action = \"left\"\n elif (next_position.x - current_position.x == 0 and \n next_position.y - current_position.y == 1):\n next_action = \"up\"\n elif (next_position.x - current_position.x == 0 and \n next_position.y - current_position.y == -1):\n next_action = \"down\"\n\n #Extended directions\n elif (next_position.x - current_position.x == 1 and \n next_position.y - current_position.y == 1):\n next_action = \"up_right\"\n elif (next_position.x - current_position.x == -1 and \n next_position.y - current_position.y == 1):\n next_action = \"up_left\"\n elif (next_position.x - current_position.x == -1 and \n next_position.y - current_position.y == -1):\n next_action = \"down_left\"\n elif (next_position.x - current_position.x == 1 and \n next_position.y - current_position.y == -1):\n next_action = \"down_right\"\n next_step = Step(current_position, next_action)\n step_list.append(next_step)\n \n if len(step_list) == 0:\n next_step = Step(position_list[0], \"stay\")\n step_list.append(next_step)\n\n return step_list", "def __init__(self, *args):\n \n self.steps = args", "def read_move(self, steps):\n res = []\n size = len(steps[0])\n side_size = int(math.sqrt(size))\n for i in range(0, len(steps) - 1):\n state = steps[i]\n next_state = steps[i + 1]\n next_pos = next_state.index(0)\n pos = state.index(0)\n rel = next_pos - pos\n direction = 'up'\n if rel == 1:\n direction = 'right'\n if rel == -1:\n direction = 'left'\n if rel == side_size:\n direction = 'down'\n res.append(direction)\n return res", "def do_steps(self):\n steps = self.get_step_conf()\n all_step_config = dict()\n for k, v in steps.items():\n tmp_list = list()\n all_step_config[k] = tmp_list\n start = v[\"Start Value\"]\n end = v[\"End Value\"]\n # special handling of edge length\n if(k == \"Edge Length\"):\n start = self.convert_to_tuple(start)\n end = self.convert_to_tuple(end)\n tmp_list.append(str(start))\n while(start != end):\n start = self.add_edge_length(\n start, self.convert_to_tuple(v[\"Step\"]))\n tmp_list.append(str(start))\n print start\n else:\n tmp_list.append(float(start))\n while float(start) < float(end):\n start = float(start) + float(v[\"Step\"])\n tmp_list.append(start)\n return all_step_config", "def raw_steps(self):\n return self.obj_payload[\"steps\"]", "def get_next_steps(self, steps):\n for step in range(steps):\n # Actual calulation: Runge-Kutta 2\n\n # Step 1\n k1 = [\n self.vel * self.dt,\n self.get_next_acc() * self.dt\n ]\n\n # Step 2\n next_pos = self.pos + k1[0] * 0.5\n next_vel = self.vel + k1[1] * 0.5\n self.disps, self.dists = self.get_relative_distances(positions=next_pos)\n k2 = [\n next_vel * self.dt,\n self.get_next_acc(save=False) * self.dt\n ]\n\n # Step 3\n next_pos = self.pos + k2[0] * 0.5\n next_vel = self.vel + k2[1] * 0.5\n self.disps, self.dists = self.get_relative_distances(positions=next_pos)\n k3 = [\n next_vel * self.dt,\n self.get_next_acc(save=False) * self.dt\n ]\n\n # Step 4\n next_pos = self.pos + k3[0]\n next_vel = self.vel + k3[1]\n self.disps, self.dists = self.get_relative_distances(positions=next_pos)\n k4 = [\n next_vel * self.dt,\n self.get_next_acc(save=False) * self.dt\n ]\n\n # Move forward\n self.pos = self.pos + 1/6 * (k1[0] + 2*k2[0] + 2*k3[0] + k4[0])\n self.vel = self.vel + 1/6 * (k1[1] + 2*k2[1] + 2*k3[1] + k4[1])\n\n # Saving of statistics\n self.save_system_information(self.pos, self.vel)", "def _availableSteps( self, lst, includeSubproofs=True ):\n assert isinstance( lst, list )\n assert isinstance( includeSubproofs, bool )\n\n assert isinstance( self._level, int )\n assert isinstance( self._steps, list )\n assert isinstance( self._outter, Env ) or ( self._outter is None )\n\n if self._outter:\n self._outter._availableSteps( lst, False )\n\n for entry in self._steps:\n if isinstance( entry, tuple ):\n if includeSubproofs:\n lst.append( entry )\n else:\n lst.append( entry )", "def on_init(self,\n steps=None,\n **kwargs):\n super(Steps, self).on_init(**kwargs)\n\n self.steps = []\n if steps:\n for item in steps:\n if isinstance(item, dict):\n self.steps.append(Step(item, len(self.steps)+1))\n else:\n self.steps.append(item)\n\n logging.debug(u\"Adding steps:\")\n for step in self.steps:\n logging.debug(u\"- {}\".format(step.label))\n\n states = ['begin',\n 'running',\n 'completed',\n 'end']\n\n transitions = [\n\n { # engage on first step as soon as we are ready\n 'source': 'begin',\n 'target': 'running',\n 'condition': self.if_ready,\n 'action': self.next_step,\n },\n\n { # wait for the underlying machine of this step to complete\n 'source': 'running',\n 'target': 'completed',\n 'condition': self.step_has_completed,\n },\n\n { # start next step if there is one and if we are ready for it\n 'source': 'completed',\n 'target': 'running',\n 'condition': self.if_next,\n 'action': self.next_step,\n },\n\n { # end this state machine\n 'source': 'completed',\n 'target': 'end',\n 'condition': self.if_end,\n 'action': self.stop,\n },\n\n ]\n\n self.build(states=states,\n transitions=transitions,\n initial='begin')", "def load_steps(self):\n # Clear stepsListWidget\n self.stepsListWidget.clear()\n\n steps = self.mgr.obj.steps\n for i, step in enumerate(steps):\n item = QListWidgetItem('Step {:d}: {:s}'.format(i, step.function_name))\n self.stepsListWidget.addItem(item)", "def get_all_steps(self):\n steps = []\n steps.extend(self.init_workspace_steps())\n steps.extend(self.repos_clone_steps())\n steps.extend(self.cli_steps())\n steps.extend(self.prepare_mobilespec_steps())\n steps.extend(self.deploy_steps())\n return steps" ]
[ "0.7150398", "0.6517314", "0.65121204", "0.65073574", "0.6322349", "0.6313758", "0.62665945", "0.61052346", "0.60600114", "0.5968533", "0.59455675", "0.5905528", "0.5889132", "0.5881727", "0.58811265", "0.5878531", "0.58683777", "0.58374953", "0.58098865", "0.5809102", "0.57995635", "0.57051855", "0.57048273", "0.5695961", "0.5587429", "0.55727065", "0.5569458", "0.5566195", "0.55641514", "0.55432045" ]
0.6559559
1
Create sorted list of steps bound to case in rising order.
def sort_case_steps(self, request): self.clear() # todo error with repeated step_id. use record id somehow for row in range(len(request)): data_step = request[row] prev_step_id = str(data_step.get(CaseSteps.PREVIOUS_STEP_ID, 0)) step_id = str(data_step.get(CaseSteps.STEP_ID, 0)) # identify first step if prev_step_id is None: if len(self) == 0: self.append(step_id) else: self.insert(0, step_id) # regular step insert else: # find previous if prev_step_id in self: self.insert(int(self.index(prev_step_id)) + 1, step_id) else: self.append(step_id) # todo debug logger ("order {}".format(self)) return self.get_steps()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_steps(self) -> List[Coordinate]:\n if self.orientation == Orientation.HORIZONTAL:\n fill = self.start.y\n else:\n fill = self.start.x\n\n if self.end.x < self.start.x:\n x_range = range(self.start.x, self.end.x - 1, -1)\n else:\n x_range = range(self.start.x, self.end.x + 1)\n\n if self.end.y < self.start.y:\n y_range = range(self.start.y, self.end.y - 1, -1)\n else:\n y_range = range(self.start.y, self.end.y + 1)\n\n return list(zip_longest(x_range, y_range, fillvalue=fill))", "def solution_steps(solution):\n steps = []\n for i in range(len(solution) - 1):\n r1, r2 = solution[i], solution[i+1]\n v1 = list(r1.vehicles - r2.vehicles)[0]\n v2 = list(r2.vehicles - r1.vehicles)[0]\n if v1.x < v2.x:\n steps.append('{0}R'.format(v1.id))\n elif v1.x > v2.x:\n steps.append('{0}L'.format(v1.id))\n elif v1.y < v2.y:\n steps.append('{0}D'.format(v1.id))\n elif v1.y > v2.y:\n steps.append('{0}U'.format(v1.id))\n return steps", "def group_consecutives(vals, step=1):\r\n\trun = []\r\n\tresult = [run]\r\n\texpect = None\r\n\tfor v in vals:\r\n\t\tif (v == expect) or (expect is None):\r\n\t\t\trun.append(v)\r\n\t\telse:\r\n\t\t\trun = [v]\r\n\t\t\tresult.append(run)\r\n\t\texpect = v + step\r\n\treturn result", "def step(v, direction, step_size):\n return [v_i + step_size * direction_i\n for v_i, direction_i in zip(v, direction)]", "def step(v, direction, step_size):\n return [v_i + step_size * direction_i\n for v_i, direction_i in zip(v, direction)]", "def generate_list_with_strategy(start: int, stop: int, step: int, strategy: Callable) -> List[int]:\n\n\n # strategy = (generate_list_with_strategy())\n # if start == stop:\n # print(start)\n # else:\n # res = []\n # while start < (stop + 1):\n # res.append(start)\n # start += step\n # yield res\n r = [strategy(item) for item in range(start, stop, step)]\n return r\n\n # def generate_list_with_strategy(start, stop, step, strategy):\n # d = [strategy(x) for x in range(start, stop, step)]\n # return d", "def get_next_steps(self, steps):\n step_list = []\n\n steps_remaining = set(steps.keys())\n counter = 0\n max_counter = 10000\n next_steps = set()\n\n for step in steps_remaining:\n dependencies = steps[step]\n if len(dependencies) == 0:\n next_steps.add(step)\n\n # this is the list of things that can be take for work now\n return sorted(next_steps)", "def group_consecutives(vals, step=0.0001):\n run = []\n result = [run]\n expect = 0.\n for v in vals:\n v = round(v,5)\n expect = round(expect,5)\n if (v == expect) or (expect == 0.):\n run.append(v)\n else:\n run = [v]\n result.append(run)\n expect = v + step\n return result", "def shortest_steps(connectors: List[int]):\n connectors = sorted(connectors)\n return [nxt-prv for prv, nxt in zip([0]+connectors, connectors)] + [3]", "def mrange(start, end, steps=1):\n list = []\n i = start\n while i < end:\n list.append(i)\n i += steps\n return list", "def generate_list(start: int, stop: int, step: int = 1) -> List[int]:\n # if start == stop:\n # print(start)\n # else:\n # res = []\n # while start < (stop + 1):\n # res.append(start)\n # start += step\n # print(res)\n\n return [item for item in range(start, (stop+step))]", "def runs(self):\n cycles = []\n temp_cycle = []\n perm = self.array_form\n for i in xrange(len(perm) - 1):\n current_elem = perm[i]\n next_elem = perm[i+1]\n\n if current_elem < next_elem:\n temp_cycle.append(current_elem)\n continue\n\n if current_elem > next_elem:\n if temp_cycle != [] and \\\n temp_cycle[-1] < current_elem:\n temp_cycle.append(current_elem)\n cycles.append(temp_cycle)\n temp_cycle = []\n continue\n else:\n if temp_cycle != []:\n cycles.append(temp_cycle)\n cycles.append([current_elem])\n temp_cycle = []\n continue\n\n if current_elem < next_elem:\n temp_cycle.append(next_elem)\n cycles.append(temp_cycle)\n else:\n if temp_cycle != []:\n cycles.append(temp_cycle)\n cycles.append([next_elem])\n return cycles", "def xSteps(self,start,ziel,steps=10):\n erg=[]\n wert=(ziel-start)/(steps)\n for i in range(1, steps+1):\n erg.append(round(start+wert*i,2))\n return erg", "def define_intervals(self):\n i = 5 # a step of increment\n interval_sum = self.min_step\n interval_list = [self.min_step]\n while interval_sum < self.max_step:\n interval_sum += i\n interval_list.append(interval_sum)\n # interval_list.append(self.max_step)\n # print(\"Intervals\", interval_list)\n return interval_list", "def _getsteps(num_of_steps, limit):\n steps = []\n current = 0.0\n for i in range(0, num_of_steps):\n if i == num_of_steps - 1:\n steps.append(int(round(limit)))\n else:\n steps.append(int(round(current)))\n current += float(limit) / float(num_of_steps - 1)\n return steps", "def grid_search_parameters(step):\n f1 = list(np.arange(step,1,step))\n f2 = list(np.arange(step,1,step))\n f2.reverse()\n return zip(f1,f2)", "def stage1(self):\n n = self.min\n while True:\n n, bin_ = self.sort_to_bin(n)\n if n is None:\n n = self.get_new_n(bin_)\n if n is None:\n break\n if self.viz:\n yield", "def make_step(self):\n self.step_vals = np.cumsum(self.vals)", "def _roll(self):\n order = np.array(self.order)\n nsteps = np.array(self.nsteps)\n order[nsteps > 1] = np.roll(order[nsteps > 1], 1)\n self.order = order.tolist()", "def getSteps():", "def all_rolls(sides):\r\n result = []\r\n temp_list = list(range(2, 2*sides+1))\r\n\r\n while temp_list:\r\n result.extend(temp_list)\r\n temp_list = temp_list[1:-1]\r\n\r\n return sorted(result)", "def get_steps_from_position_list(position_list):\n\n step_list = []\n\n for i in range(len(position_list) - 1):\n current_position = position_list[i]\n next_position = position_list[i+1]\n if (current_position.x == next_position.x and \n current_position.y == next_position.y):\n continue\n\n #Cardinal directions\n if (next_position.x - current_position.x == 1 and \n next_position.y - current_position.y == 0):\n next_action = \"right\"\n elif (next_position.x - current_position.x == -1 and \n next_position.y - current_position.y == 0):\n next_action = \"left\"\n elif (next_position.x - current_position.x == 0 and \n next_position.y - current_position.y == 1):\n next_action = \"up\"\n elif (next_position.x - current_position.x == 0 and \n next_position.y - current_position.y == -1):\n next_action = \"down\"\n\n #Extended directions\n elif (next_position.x - current_position.x == 1 and \n next_position.y - current_position.y == 1):\n next_action = \"up_right\"\n elif (next_position.x - current_position.x == -1 and \n next_position.y - current_position.y == 1):\n next_action = \"up_left\"\n elif (next_position.x - current_position.x == -1 and \n next_position.y - current_position.y == -1):\n next_action = \"down_left\"\n elif (next_position.x - current_position.x == 1 and \n next_position.y - current_position.y == -1):\n next_action = \"down_right\"\n next_step = Step(current_position, next_action)\n step_list.append(next_step)\n \n if len(step_list) == 0:\n next_step = Step(position_list[0], \"stay\")\n step_list.append(next_step)\n\n return step_list", "def _make_bins(start, stop, step):\n bin_edges = np.arange(start, stop + step, step)\n\n return bin_edges", "def wiggleSort(self, arr: List[int]) -> None:\n n = len(arr)\n if n <= 1:\n return\n\n #up = False # start with False so that first pair will be non-dec\n\n for i in range(1, n):\n #if up == (arr[i] >= arr[i-1]):\n if (i & 1 == 0) == (arr[i] >= arr[i-1]):\n arr[i], arr[i-1] = arr[i-1], arr[i]\n \n #up = not up", "def _set_steps(self, bounds, steps):\n if type(steps) == int:\n self.steps = [np.linspace(b1,b2,steps) for b1,b2 in bounds]\n elif type(steps) == list and type(steps[0]) == int:\n self.steps = [np.linspace(b1, b2, s) for (b1, b2), s in zip(bounds, steps)]\n else:\n self.steps = steps.copy()", "def split_range(valsize, step, start, end):\n \n shift = 0\n while True:\n diff = 1 << (shift + step)\n mask = ((1 << step) - 1) << shift\n setbits = lambda x: x | ((1 << shift) - 1)\n \n haslower = (start & mask) != 0\n hasupper = (end & mask) != mask\n \n not_mask = ~mask & ((1 << valsize + 1) - 1)\n nextstart = (start + diff if haslower else start) & not_mask\n nextend = (end - diff if hasupper else end) & not_mask\n \n if shift + step >= valsize or nextstart > nextend:\n yield (start, setbits(end), shift)\n break\n \n if haslower:\n yield (start, setbits(start | mask), shift)\n if hasupper:\n yield (end & not_mask, setbits(end), shift)\n \n start = nextstart\n end = nextend\n shift += step", "def orders(self):\n self._current_order = self.min_order\n while self._current_order <= self.max_order:\n yield self._current_order\n self._current_order += 1\n del self._current_order", "def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries", "def discrete_layer(width: float, steps: int) -> list:\n\n min_x = 0.001\n steps = steps/2\n\n def sum_function(stretch_factor):\n return width - min_x * ((1 - stretch_factor**steps)/(1 - stretch_factor))\n\n stretch = float(fsolve(sum_function, 1.3)[0])\n\n return sub_division(width, min_x, stretch)", "def solution_steps(solution):\n steps = []\n # Loops through the list of Grid objects\n for i in range(len(solution[0]) - 1):\n # Calculating the difference between the vehicles in two Grid objects\n grid1, grid2 = solution[0][i], solution[0][i + 1]\n vehicle1 = list(grid1.vehicles - grid2.vehicles)[0]\n vehicle2 = list(grid2.vehicles - grid1.vehicles)[0]\n\n # Get direction of the step\n if vehicle1.x < vehicle2.x:\n steps.append('step %d: {0} right'.format(vehicle1.name) %i)\n elif vehicle1.x > vehicle2.x:\n steps.append('step %d: {0} left'.format(vehicle1.name) %i)\n elif vehicle1.y < vehicle2.y:\n steps.append('step %d: {0} down'.format(vehicle1.name) %i)\n elif vehicle1.y > vehicle2.y:\n steps.append('step %d: {0} up'.format(vehicle1.name) %i)\n return steps" ]
[ "0.60645884", "0.5842618", "0.5752251", "0.5727292", "0.57264835", "0.5698114", "0.5696493", "0.5655299", "0.5610366", "0.54864746", "0.54621786", "0.5460923", "0.5425928", "0.5359517", "0.5358369", "0.5346567", "0.5337107", "0.5318492", "0.5314977", "0.5291446", "0.52641326", "0.5263718", "0.523544", "0.5223234", "0.5207775", "0.52012086", "0.5185762", "0.51847243", "0.5180889", "0.5153557" ]
0.59152174
1
Open meeting's calendar view to schedule a meeting on current phonecall.
def action_make_meeting(self): partner_ids = [ self.env['res.users'].browse(self.env.uid).partner_id.id] res = {} for phonecall in self: if phonecall.partner_id and phonecall.partner_id.email: partner_ids.append(phonecall.partner_id.id) res = self.env['ir.actions.act_window'].for_xml_id( 'calendar', 'action_calendar_event') res['context'] = { 'default_phonecall_id': phonecall.id, 'default_partner_ids': partner_ids, 'default_user_id': self.env.uid, 'default_email_from': phonecall.email_from, 'default_name': phonecall.name, } return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_mwindow_agenda(self) -> None:\n self.mwindow_agenda.show()", "def meeting(request, meeting_id):\n meeting = get_object_or_404(Meeting, pk=meeting_id)\n context = {'meeting': meeting}\n return render(request, 'sacms/meeting.html', context)", "def edit_meeting_schedule(request, num=None, owner=None, name=None):\n # Need to coordinate this list with types of session requests\n # that can be created (see, e.g., SessionQuerySet.requests())\n IGNORE_TIMESLOT_TYPES = ('offagenda', 'reserved', 'unavail')\n meeting = get_meeting(num)\n if name is None:\n schedule = meeting.schedule\n else:\n schedule = get_schedule_by_name(meeting, get_person_by_email(owner), name)\n\n if schedule is None:\n raise Http404(\"No meeting information for meeting %s owner %s schedule %s available\" % (num, owner, name))\n\n can_see, can_edit, secretariat = schedule_permissions(meeting, schedule, request.user)\n\n lock_time = settings.MEETING_SESSION_LOCK_TIME\n def timeslot_locked(ts):\n meeting_now = now().astimezone(pytz.timezone(meeting.time_zone))\n if not settings.USE_TZ:\n meeting_now = meeting_now.replace(tzinfo=None)\n return schedule.is_official and (ts.time - meeting_now < lock_time)\n\n if not can_see:\n if request.method == 'POST':\n permission_denied(request, \"Can't view this schedule.\")\n\n return render(request, \"meeting/private_schedule.html\", {\n \"schedule\":schedule,\n \"meeting\": meeting,\n \"meeting_base_url\": request.build_absolute_uri(meeting.base_url()),\n \"hide_menu\": True\n }, status=403, content_type=\"text/html\")\n\n # See if we were given one or more 'type' query string parameters. If so, filter to that timeslot type.\n if 'type' in request.GET:\n include_timeslot_types = request.GET.getlist('type')\n else:\n include_timeslot_types = None # disables filtering by type (other than IGNORE_TIMESLOT_TYPES)\n\n assignments = SchedTimeSessAssignment.objects.filter(\n schedule__in=[schedule, schedule.base],\n timeslot__location__isnull=False,\n )\n if include_timeslot_types is not None:\n assignments = assignments.filter(session__type__in=include_timeslot_types)\n assignments = assignments.order_by('timeslot__time','timeslot__name')\n\n assignments_by_session = defaultdict(list)\n for a in assignments:\n assignments_by_session[a.session_id].append(a)\n\n tombstone_states = ['canceled', 'canceledpa', 'resched']\n\n sessions = Session.objects.filter(meeting=meeting)\n if include_timeslot_types is not None:\n sessions = sessions.filter(type__in=include_timeslot_types)\n sessions = add_event_info_to_session_qs(\n sessions.exclude(\n type__in=IGNORE_TIMESLOT_TYPES,\n ).order_by('pk'),\n requested_time=True,\n requested_by=True,\n ).filter(\n Q(current_status__in=['appr', 'schedw', 'scheda', 'sched'])\n | Q(current_status__in=tombstone_states, pk__in={a.session_id for a in assignments})\n ).prefetch_related(\n 'resources', 'group', 'group__parent', 'group__type', 'joint_with_groups', 'purpose',\n )\n\n timeslots_qs = TimeSlot.objects.filter(meeting=meeting)\n if include_timeslot_types is not None:\n timeslots_qs = timeslots_qs.filter(type__in=include_timeslot_types)\n timeslots_qs = timeslots_qs.exclude(\n type__in=IGNORE_TIMESLOT_TYPES,\n ).prefetch_related('type').order_by('location', 'time', 'name')\n\n if timeslots_qs.count() > 0:\n min_duration = min(t.duration for t in timeslots_qs)\n max_duration = max(t.duration for t in timeslots_qs)\n else:\n min_duration = 1\n max_duration = 2\n\n def timedelta_to_css_ems(timedelta):\n # we scale the session and slots a bit according to their\n # length for an added visual clue\n capped_min_d = max(min_duration, datetime.timedelta(minutes=30))\n capped_max_d = min(max_duration, datetime.timedelta(hours=4))\n capped_timedelta = min(max(capped_min_d, timedelta), capped_max_d)\n\n min_d_css_rems = 8\n max_d_css_rems = 10\n # interpolate\n scale = (capped_timedelta - capped_min_d) / (capped_max_d - capped_min_d) if capped_min_d != capped_max_d else 1\n return min_d_css_rems + (max_d_css_rems - min_d_css_rems) * scale\n\n def prepare_sessions_for_display(sessions):\n # requesters\n requested_by_lookup = {p.pk: p for p in Person.objects.filter(pk__in=set(s.requested_by for s in sessions if s.requested_by))}\n\n # constraints\n constraints_for_sessions, formatted_constraints_for_sessions, constraint_names = preprocess_constraints_for_meeting_schedule_editor(meeting, sessions)\n\n sessions_for_group = defaultdict(list)\n for s in sessions:\n sessions_for_group[s.group_id].append(s)\n\n for s in sessions:\n s.requested_by_person = requested_by_lookup.get(s.requested_by)\n\n s.scheduling_label = \"???\"\n s.purpose_label = None\n if (s.purpose.slug in ('none', 'regular')) and s.group:\n s.scheduling_label = s.group.acronym\n s.purpose_label = 'BoF' if s.group.is_bof() else s.group.type.name\n else:\n s.purpose_label = s.purpose.name\n if s.name:\n s.scheduling_label = s.name\n\n s.requested_duration_in_hours = round(s.requested_duration.seconds / 60.0 / 60.0, 1)\n\n session_layout_margin = 0.2\n s.layout_width = timedelta_to_css_ems(s.requested_duration) - 2 * session_layout_margin\n s.parent_acronym = s.group.parent.acronym if s.group and s.group.parent else \"\"\n\n # compress the constraints, so similar constraint labels are\n # shared between the conflicting sessions they cover - the JS\n # then simply has to detect violations and show the\n # preprocessed labels\n constrained_sessions_grouped_by_label = defaultdict(set)\n for name_id, ts in itertools.groupby(sorted(constraints_for_sessions.get(s.pk, [])), key=lambda t: t[0]):\n ts = list(ts)\n session_pks = (t[1] for t in ts)\n constraint_name = constraint_names[name_id]\n if \"{count}\" in constraint_name.formatted_editor_label:\n for session_pk, grouped_session_pks in itertools.groupby(session_pks):\n count = sum(1 for i in grouped_session_pks)\n constrained_sessions_grouped_by_label[format_html(constraint_name.formatted_editor_label, count=count)].add(session_pk)\n\n else:\n constrained_sessions_grouped_by_label[constraint_name.formatted_editor_label].update(session_pks)\n\n s.constrained_sessions = list(constrained_sessions_grouped_by_label.items())\n s.formatted_constraints = formatted_constraints_for_sessions.get(s.pk, {})\n\n s.other_sessions = [s_other for s_other in sessions_for_group.get(s.group_id) if s != s_other]\n\n s.readonly = s.current_status in tombstone_states or any(a.schedule_id != schedule.pk for a in assignments_by_session.get(s.pk, []))\n\n def prepare_timeslots_for_display(timeslots, rooms):\n \"\"\"Prepare timeslot data for template\n\n Prepares timeslots for display by sorting into groups in a structure\n that can be rendered by the template and by adding some data to the timeslot\n instances. Currently adds a 'layout_width' property to each timeslot instance.\n The layout_width is the width, in em, that should be used to style the timeslot's\n width.\n\n Rooms are partitioned into groups that have identical sets of timeslots\n for the entire meeting.\n\n The result of this method is an OrderedDict, days, keyed by the Date\n of each day that has at least one timeslot. The value of days[day] is a\n list with one entry for each group of rooms. Each entry is a list of\n dicts with keys 'room' and 'timeslots'. The 'room' value is the room\n instance and 'timeslots' is a list of timeslot instances for that room.\n\n The format is more easily illustrated than explained:\n\n days = OrderedDict(\n Date(2021, 5, 27): [\n [ # room group 1\n {'room': <room1>, 'timeslots': [<room1 timeslot1>, <room1 timeslot2>]},\n {'room': <room2>, 'timeslots': [<room2 timeslot1>, <room2 timeslot2>]},\n {'room': <room3>, 'timeslots': [<room3 timeslot1>, <room3 timeslot2>]},\n ],\n [ # room group 2\n {'room': <room4>, 'timeslots': [<room4 timeslot1>]},\n ],\n ],\n Date(2021, 5, 28): [\n [ # room group 1\n {'room': <room1>, 'timeslots': [<room1 timeslot3>]},\n {'room': <room2>, 'timeslots': [<room2 timeslot3>]},\n {'room': <room3>, 'timeslots': [<room3 timeslot3>]},\n ],\n [ # room group 2\n {'room': <room4>, 'timeslots': []},\n ],\n ],\n )\n \"\"\"\n\n # Populate room_data. This collects the timeslots for each room binned by\n # day, plus data needed for sorting the rooms for display.\n room_data = dict()\n all_days = set()\n # timeslots_qs is already sorted by location, name, and time\n for t in timeslots:\n if t.location not in rooms:\n continue\n\n t.layout_width = timedelta_to_css_ems(t.duration)\n if t.location_id not in room_data:\n room_data[t.location_id] = dict(\n timeslots_by_day=dict(),\n timeslot_count=0,\n start_and_duration=[],\n first_timeslot = t,\n )\n rd = room_data[t.location_id]\n rd['timeslot_count'] += 1\n rd['start_and_duration'].append((t.time, t.duration))\n ttd = t.time.date()\n all_days.add(ttd)\n if ttd not in rd['timeslots_by_day']:\n rd['timeslots_by_day'][ttd] = []\n rd['timeslots_by_day'][ttd].append(t)\n\n all_days = sorted(all_days) # changes set to a list\n # Note the maximum timeslot count for any room\n if len(room_data) > 0:\n max_timeslots = max(rd['timeslot_count'] for rd in room_data.values())\n else:\n max_timeslots = 0\n\n # Partition rooms into groups with identical timeslot arrangements.\n # Start by discarding any roos that have no timeslots.\n rooms_with_timeslots = [r for r in rooms if r.pk in room_data]\n # Then sort the remaining rooms.\n sorted_rooms = sorted(\n rooms_with_timeslots,\n key=lambda room: (\n # First, sort regular session rooms ahead of others - these will usually\n # have more timeslots than other room types.\n 0 if room_data[room.pk]['timeslot_count'] == max_timeslots else 1,\n # Sort rooms with earlier timeslots ahead of later\n room_data[room.pk]['first_timeslot'].time,\n # Sort rooms with more sessions ahead of rooms with fewer\n 0 - room_data[room.pk]['timeslot_count'],\n # Sort by list of starting time and duration so that groups with identical\n # timeslot structure will be neighbors. The grouping algorithm relies on this!\n room_data[room.pk]['start_and_duration'],\n # Within each group, sort higher capacity rooms first.\n room.capacity,\n # Finally, sort alphabetically by name\n room.name\n )\n )\n\n # Rooms are now ordered so rooms with identical timeslot arrangements are neighbors.\n # Walk the list, splitting these into groups.\n room_groups = []\n last_start_and_duration = None # Used to watch for changes in start_and_duration\n for room in sorted_rooms:\n if last_start_and_duration != room_data[room.pk]['start_and_duration']:\n room_groups.append([]) # start a new room_group\n last_start_and_duration = room_data[room.pk]['start_and_duration']\n room_groups[-1].append(room)\n\n # Next, build the structure that will hold the data for the view. This makes it\n # easier to arrange that every room has an entry for every day, even if there is\n # no timeslot for that day. This makes the HTML template much easier to write.\n # Use OrderedDicts instead of lists so that we can easily put timeslot data in the\n # right place.\n days = OrderedDict(\n (\n day, # key in the Ordered Dict\n [\n # each value is an OrderedDict of room group data\n OrderedDict(\n (room.pk, dict(room=room, timeslots=[]))\n for room in rg\n ) for rg in room_groups\n ]\n ) for day in all_days\n )\n\n # With the structure's skeleton built, now fill in the data. The loops must\n # preserve the order of room groups and rooms within each group.\n for rg_num, rgroup in enumerate(room_groups):\n for room in rgroup:\n for day, ts_for_day in room_data[room.pk]['timeslots_by_day'].items():\n days[day][rg_num][room.pk]['timeslots'] = ts_for_day\n\n # Now convert the OrderedDict entries into lists since we don't need to\n # do lookup by pk any more.\n for day in days.keys():\n days[day] = [list(rg.values()) for rg in days[day]]\n\n return days\n\n def _json_response(success, status=None, **extra_data):\n if status is None:\n status = 200 if success else 400\n data = dict(success=success, **extra_data)\n return JsonResponse(data, status=status)\n\n if request.method == 'POST':\n if not can_edit:\n permission_denied(request, \"Can't edit this schedule.\")\n\n action = request.POST.get('action')\n\n # Handle ajax requests. Most of these return JSON responses with at least a 'success' key.\n # For the swapdays and swaptimeslots actions, the response is either a redirect to the\n # updated page or a simple BadRequest error page. The latter should not normally be seen\n # by the user, because the front end should be preventing most invalid requests.\n if action == 'assign' and request.POST.get('session', '').isdigit() and request.POST.get('timeslot', '').isdigit():\n session = get_object_or_404(sessions, pk=request.POST['session'])\n timeslot = get_object_or_404(timeslots_qs, pk=request.POST['timeslot'])\n if timeslot_locked(timeslot):\n return _json_response(False, error=\"Can't assign to this timeslot.\")\n\n tombstone_session = None\n\n existing_assignments = SchedTimeSessAssignment.objects.filter(session=session, schedule=schedule)\n\n if existing_assignments:\n assertion('len(existing_assignments) <= 1',\n note='Multiple assignments for {} in schedule {}'.format(session, schedule))\n\n if timeslot_locked(existing_assignments[0].timeslot):\n return _json_response(False, error=\"Can't reassign this session.\")\n\n if schedule.pk == meeting.schedule_id and session.current_status == 'sched':\n old_timeslot = existing_assignments[0].timeslot\n # clone session and leave it as a tombstone\n tombstone_session = session\n tombstone_session.tombstone_for_id = session.pk\n tombstone_session.pk = None\n tombstone_session.save()\n\n session = None\n\n SchedulingEvent.objects.create(\n session=tombstone_session,\n status=SessionStatusName.objects.get(slug='resched'),\n by=request.user.person,\n )\n\n tombstone_session.current_status = 'resched' # rematerialize status for the rendering\n\n SchedTimeSessAssignment.objects.create(\n session=tombstone_session,\n schedule=schedule,\n timeslot=old_timeslot,\n )\n\n existing_assignments.update(timeslot=timeslot, modified=datetime.datetime.now())\n else:\n SchedTimeSessAssignment.objects.create(\n session=session,\n schedule=schedule,\n timeslot=timeslot,\n )\n\n if tombstone_session:\n prepare_sessions_for_display([tombstone_session])\n return _json_response(\n True,\n tombstone=render_to_string(\"meeting/edit_meeting_schedule_session.html\",\n {'session': tombstone_session})\n )\n else:\n return _json_response(True)\n\n elif action == 'unassign' and request.POST.get('session', '').isdigit():\n session = get_object_or_404(sessions, pk=request.POST['session'])\n existing_assignments = SchedTimeSessAssignment.objects.filter(session=session, schedule=schedule)\n assertion('len(existing_assignments) <= 1',\n note='Multiple assignments for {} in schedule {}'.format(session, schedule))\n if not any(timeslot_locked(ea.timeslot) for ea in existing_assignments):\n existing_assignments.delete()\n else:\n return _json_response(False, error=\"Can't unassign this session.\")\n\n return _json_response(True)\n\n elif action == 'swapdays':\n # updating the client side is a bit complicated, so just\n # do a full refresh\n\n swap_days_form = SwapDaysForm(request.POST)\n if not swap_days_form.is_valid():\n return HttpResponseBadRequest(\"Invalid swap: {}\".format(swap_days_form.errors))\n\n source_day = swap_days_form.cleaned_data['source_day']\n target_day = swap_days_form.cleaned_data['target_day']\n\n source_timeslots = [ts for ts in timeslots_qs if ts.time.date() == source_day]\n target_timeslots = [ts for ts in timeslots_qs if ts.time.date() == target_day]\n if any(timeslot_locked(ts) for ts in source_timeslots + target_timeslots):\n return HttpResponseBadRequest(\"Can't swap these days.\")\n\n swap_meeting_schedule_timeslot_assignments(schedule, source_timeslots, target_timeslots, target_day - source_day)\n\n return HttpResponseRedirect(request.get_full_path())\n\n elif action == 'swaptimeslots':\n # Swap sets of timeslots with equal start/end time for a given set of rooms.\n # Gets start and end times from TimeSlot instances for the origin and target,\n # then swaps all timeslots for the requested rooms whose start/end match those.\n # The origin/target timeslots do not need to be the same duration.\n swap_timeslots_form = SwapTimeslotsForm(meeting, request.POST)\n if not swap_timeslots_form.is_valid():\n return HttpResponseBadRequest(\"Invalid swap: {}\".format(swap_timeslots_form.errors))\n\n affected_rooms = swap_timeslots_form.cleaned_data['rooms']\n origin_timeslot = swap_timeslots_form.cleaned_data['origin_timeslot']\n target_timeslot = swap_timeslots_form.cleaned_data['target_timeslot']\n\n origin_timeslots = meeting.timeslot_set.filter(\n location__in=affected_rooms,\n time=origin_timeslot.time,\n duration=origin_timeslot.duration,\n )\n target_timeslots = meeting.timeslot_set.filter(\n location__in=affected_rooms,\n time=target_timeslot.time,\n duration=target_timeslot.duration,\n )\n if (any(timeslot_locked(ts) for ts in origin_timeslots)\n or any(timeslot_locked(ts) for ts in target_timeslots)):\n return HttpResponseBadRequest(\"Can't swap these timeslots.\")\n\n swap_meeting_schedule_timeslot_assignments(\n schedule,\n list(origin_timeslots),\n list(target_timeslots),\n target_timeslot.time - origin_timeslot.time,\n )\n return HttpResponseRedirect(request.get_full_path())\n\n return _json_response(False, error=\"Invalid parameters\")\n\n # Show only rooms that have regular sessions\n if include_timeslot_types is None:\n rooms = meeting.room_set.all()\n else:\n rooms = meeting.room_set.filter(session_types__slug__in=include_timeslot_types)\n\n # Construct timeslot data for the template to render\n days = prepare_timeslots_for_display(timeslots_qs, rooms)\n\n # possible timeslot start/ends\n timeslot_groups = defaultdict(set)\n for ts in timeslots_qs:\n ts.start_end_group = \"ts-group-{}-{}\".format(ts.time.strftime(\"%Y%m%d-%H%M\"), int(ts.duration.total_seconds() / 60))\n timeslot_groups[ts.time.date()].add((ts.time, ts.end_time(), ts.start_end_group))\n\n # prepare sessions\n prepare_sessions_for_display(sessions)\n\n for ts in timeslots_qs:\n ts.session_assignments = []\n timeslots_by_pk = {ts.pk: ts for ts in timeslots_qs}\n\n unassigned_sessions = []\n for s in sessions:\n assigned = False\n for a in assignments_by_session.get(s.pk, []):\n timeslot = timeslots_by_pk.get(a.timeslot_id)\n if timeslot:\n timeslot.session_assignments.append((a, s))\n assigned = True\n\n if not assigned:\n unassigned_sessions.append(s)\n\n # group parent colors\n def cubehelix(i, total, hue=1.2, start_angle=0.5):\n # theory in https://arxiv.org/pdf/1108.5083.pdf\n rotations = total // 4\n x = float(i + 1) / (total + 1)\n phi = 2 * math.pi * (start_angle / 3 + rotations * x)\n a = hue * x * (1 - x) / 2.0\n\n return (\n max(0, min(x + a * (-0.14861 * math.cos(phi) + 1.78277 * math.sin(phi)), 1)),\n max(0, min(x + a * (-0.29227 * math.cos(phi) + -0.90649 * math.sin(phi)), 1)),\n max(0, min(x + a * (1.97294 * math.cos(phi)), 1)),\n )\n\n session_parents = sorted(set(\n s.group.parent for s in sessions\n if s.group and s.group.parent and (s.group.parent.type_id == 'area' or s.group.parent.acronym in ('irtf','iab'))\n ), key=lambda p: p.acronym)\n\n liz_preferred_colors = {\n 'art' : { 'dark' : (204, 121, 167) , 'light' : (234, 232, 230) },\n 'gen' : { 'dark' : (29, 78, 17) , 'light' : (232, 237, 231) },\n 'iab' : { 'dark' : (255, 165, 0) , 'light' : (255, 246, 230) },\n 'int' : { 'dark' : (132, 240, 240) , 'light' : (232, 240, 241) },\n 'irtf' : { 'dark' : (154, 119, 230) , 'light' : (243, 239, 248) },\n 'ops' : { 'dark' : (199, 133, 129) , 'light' : (250, 240, 242) },\n 'rtg' : { 'dark' : (222, 219, 124) , 'light' : (247, 247, 233) },\n 'sec' : { 'dark' : (0, 114, 178) , 'light' : (245, 252, 248) },\n 'tsv' : { 'dark' : (117,201,119) , 'light' : (251, 252, 255) },\n } \n for i, p in enumerate(session_parents):\n if p.acronym in liz_preferred_colors:\n colors = liz_preferred_colors[p.acronym]\n p.scheduling_color = \"rgb({}, {}, {})\".format(*colors['dark'])\n p.light_scheduling_color = \"rgb({}, {}, {})\".format(*colors['light'])\n else:\n rgb_color = cubehelix(i, len(session_parents))\n p.scheduling_color = \"rgb({}, {}, {})\".format(*tuple(int(round(x * 255)) for x in rgb_color))\n p.light_scheduling_color = \"rgb({}, {}, {})\".format(*tuple(int(round((0.9 + 0.1 * x) * 255)) for x in rgb_color))\n\n session_purposes = sorted(set(s.purpose for s in sessions if s.purpose), key=lambda p: p.name)\n timeslot_types = sorted(\n set(\n s.type for s in sessions if s.type\n ).union(\n t.type for t in timeslots_qs.all()\n ),\n key=lambda tstype: tstype.name,\n )\n\n return render(request, \"meeting/edit_meeting_schedule.html\", {\n 'meeting': meeting,\n 'schedule': schedule,\n 'can_edit': can_edit,\n 'can_edit_properties': can_edit or secretariat,\n 'secretariat': secretariat,\n 'days': days,\n 'timeslot_groups': sorted((d, list(sorted(t_groups))) for d, t_groups in timeslot_groups.items()),\n 'unassigned_sessions': unassigned_sessions,\n 'session_parents': session_parents,\n 'session_purposes': session_purposes,\n 'timeslot_types': timeslot_types,\n 'hide_menu': True,\n 'lock_time': lock_time,\n })", "def action_makeMeeting(self, cr, uid, ids, context=None):\n opportunity = self.browse(cr, uid, ids[0], context)\n res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'base_calendar', 'action_crm_meeting', context)\n res['context'] = {\n 'default_opportunity_id': opportunity.id,\n 'default_partner_id': opportunity.partner_id and opportunity.partner_id.id or False,\n 'default_partner_ids' : opportunity.partner_id and [opportunity.partner_id.id] or False,\n 'default_user_id': uid,\n 'default_section_id': opportunity.section_id and opportunity.section_id.id or False,\n 'default_email_from': opportunity.email_from,\n 'default_state': 'open',\n 'default_name': opportunity.name,\n }\n return res", "def schedule_meeting(intent_request):\n \n meeting_person = intent_request['currentIntent']['slots']['Person']\n meeting_type = intent_request['currentIntent']['slots']['MeetingType']\n meeting_date = intent_request['currentIntent']['slots']['Date']\n meeting_time = intent_request['currentIntent']['slots']['Time']\n meeting_duration = intent_request['currentIntent']['slots']['Duration']\n meeting_address = intent_request['currentIntent']['slots']['Address']\n invitation_link = intent_request['currentIntent']['slots']['InvitationLink']\n phone_number = intent_request['currentIntent']['slots']['Phone']\n source = intent_request['invocationSource']\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\n booking_map = json.loads(try_ex(lambda: output_session_attributes['bookingMap']) or '{}')\n\n if source == 'DialogCodeHook':\n # Perform basic validation on the supplied input slots.\n slots = intent_request['currentIntent']['slots']\n validation_result = validate_schedule_meeting(meeting_duration, date, meeting_time)\n if not validation_result['isValid']:\n slots[validation_result['violatedSlot']] = None\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n validation_result['violatedSlot'],\n validation_result['message']\n )\n\n if not meeting_person:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Person',\n {'contentType': 'PlainText', 'content': 'Who is gonna be that with?'}\n )\n \n if meeting_person and not meeting_type:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'MeetingType',\n {'contentType': 'PlainText', 'content': 'What type of meeting would you like to schedule?'}\n )\n\n if meeting_person and meeting_type and not meeting_date:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Date',\n {'contentType': 'PlainText', 'content': 'When would you like to schedule your {} ?'.format(meeting_type)}\n )\n\n if meeting_type and meeting_date:\n # Fetch or generate the availabilities for the given date.\n booking_availabilities = try_ex(lambda: booking_map[meeting_date])\n if booking_availabilities is None:\n booking_availabilities = get_availabilities(meeting_date)\n booking_map[meeting_date] = booking_availabilities\n output_session_attributes['bookingMap'] = json.dumps(booking_map)\n\n meeting_type_availabilities = get_availabilities_for_duration(get_duration(meeting_type), booking_availabilities)\n if len(meeting_type_availabilities) == 0:\n # No availability on this day at all; ask for a new date and time.\n slots['Date'] = None\n slots['Time'] = None\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n 'Date',\n {'contentType': 'PlainText', 'content': 'There is not any availability on that date, is there another day which works for you?'}\n )\n\n message_content = 'What time on {} works for you? '.format(meeting_date)\n if meeting_time:\n output_session_attributes['formattedTime'] = build_time_output_string(meeting_time)\n # Validate that proposed time for the meeting can be booked by first fetching the availabilities for the given day. To\n # give consistent behavior in the sample, this is stored in sessionAttributes after the first lookup.\n if is_available(meeting_time, get_duration(meeting_type), booking_availabilities):\n return delegate(output_session_attributes, slots)\n message_content = 'The time you requested is not available. '\n\n if len(meeting_type_availabilities) == 1:\n # If there is only one availability on the given date, try to confirm it.\n slots['Time'] = meeting_type_availabilities[0]\n return confirm_intent(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n {\n 'contentType': 'PlainText',\n 'content': '{}{} is our only availability, does that work for you?'.format\n (message_content, build_time_output_string(meeting_type_availabilities[0]))\n },\n build_response_card(\n 'Confirm Meeting',\n 'Is {} on {} okay?'.format(build_time_output_string(meeting_type_availabilities[0]), date),\n [{'text': 'yes', 'value': 'yes'}, {'text': 'no', 'value': 'no'}]\n )\n )\n\n available_time_string = build_available_time_string(meeting_type_availabilities)\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n 'Time',\n {'contentType': 'PlainText', 'content': '{}{}'.format(message_content, available_time_string)},\n build_response_card(\n 'Specify Time',\n 'What time works best for you?',\n build_options('Time', meeting_type, meeting_date, booking_map)\n )\n )\n \n if meeting_type = 'online' and meeting_person and meeting_date and meeting_time and not invitation_link:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'InvitationLink',\n {'contentType': 'PlainText', 'content': 'Can you paste your invitation link in here, please?'}\n )\n \n if (meeting_type = 'personal' or meeting_type = 'inperson') and meeting_person and meeting_date and meeting_time and not meeting_address:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Address',\n {'contentType': 'PlainText', 'content': 'Where the {} will take place?', .format(meeting_type)}\n )\n \n if meeting_person and meeting_type and meeting_date and meeting_time and (invitation_link or meeting_address) and not contact_phone\"\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Phone',\n {'contentType': 'PlainText', 'content': 'Can you leave your contact phone number here, please?'}\n\n return delegate(output_session_attributes, slots)\n \n \n \"\"\" --- Check avalibility --- \"\"\"\n\n\n # Book the meeting.\n booking_availabilities = booking_map[meeting_date]\n if booking_availabilities:\n # Remove the availability slot for the given date as it has now been booked.\n booking_availabilities.remove(meeting_time)\n if meeting_duration == 60:\n second_half_hour_time = increment_time_by_thirty_mins(meeting_time)\n booking_availabilities.remove(second_half_hour_time)\n\n booking_map[date] = booking_availabilities\n output_session_attributes['bookingMap'] = json.dumps(booking_map)\n else:\n # This is not treated as an error as this code sample supports functionality either as fulfillment or dialog code hook.\n logger.debug('Availabilities for {} were null at fulfillment time. '\n 'This should have been initialized if this function was configured as the dialog code hook'.format(meeting_date))\n\n return close(\n output_session_attributes,\n 'Fulfilled',\n {\n 'contentType': 'PlainText',\n 'content': 'Okay, I have booked your meeting. See you at {} on {}'.format(build_time_output_string(meeting_time), meeting_date)\n }\n )", "def schedule(request):\r\n\r\n return render(request, 'editorial/schedule.html', {})", "def complete_appointment(request, calendar_id):\n calendar = Calendar.objects.get(pk=calendar_id)\n return render(request, 'complete_appointment.html', {'calendar': calendar})", "def open_mwindow_timetable(self) -> None:\n self.mwindow_timetable.show()", "def upcoming_ical(request):\n try:\n filter_params = parse_agenda_filter_params(request.GET)\n except ValueError as e:\n return HttpResponseBadRequest(str(e))\n \n today = datetime.date.today()\n\n # get meetings starting 7 days ago -- we'll filter out sessions in the past further down\n meetings = data_for_meetings_overview(Meeting.objects.filter(date__gte=today-datetime.timedelta(days=7)).prefetch_related('schedule').order_by('date'))\n\n assignments = list(SchedTimeSessAssignment.objects.filter(\n schedule__in=[m.schedule_id for m in meetings] + [m.schedule.base_id for m in meetings if m.schedule],\n session__in=[s.pk for m in meetings for s in m.sessions if m.type_id != 'ietf'],\n timeslot__time__gte=today,\n ).order_by(\n 'schedule__meeting__date', 'session__type', 'timeslot__time'\n ).select_related(\n 'session__group', 'session__group__parent', 'timeslot', 'schedule', 'schedule__meeting'\n ).distinct())\n\n AgendaKeywordTagger(assignments=assignments).apply()\n\n # apply filters\n if filter_params is not None:\n assignments = [a for a in assignments if should_include_assignment(filter_params, a)]\n\n # we already collected sessions with current_status, so reuse those\n sessions = {s.pk: s for m in meetings for s in m.sessions}\n for a in assignments:\n if a.session_id is not None:\n a.session = sessions.get(a.session_id) or a.session\n a.session.ical_status = ical_session_status(a)\n\n # handle IETFs separately\n ietfs = [m for m in meetings if m.type_id == 'ietf']\n preprocess_meeting_important_dates(ietfs)\n\n # icalendar response file should have '\\r\\n' line endings per RFC5545\n response = render_to_string('meeting/upcoming.ics', {\n 'vtimezones': ''.join(sorted(list({meeting.vtimezone() for meeting in meetings if meeting.vtimezone()}))),\n 'assignments': assignments,\n 'ietfs': ietfs,\n }, request=request)\n response = re.sub(\"\\r(?!\\n)|(?<!\\r)\\n\", \"\\r\\n\", response)\n\n response = HttpResponse(response, content_type='text/calendar')\n response['Content-Disposition'] = 'attachment; filename=\"upcoming.ics\"'\n return response", "def save_appointment_details(request, calendar_id):\n def schedule_mail(reminder_date, appointment):\n # Configure our scheduler for reminder\n try:\n trigger = DateTrigger(run_date=reminder_date)\n scheduler.add_job(send_appointment_mail, args=[appointment], trigger=trigger)\n except Exception as exp:\n print(exp)\n \n def schedule_sms(reminder_date, appointment):\n # Configure our scheduler for reminder\n try:\n trigger = DateTrigger(run_date=reminder_date)\n scheduler.add_job(send_appointment_sms, args=[appointment], trigger=trigger)\n except Exception as exp:\n print(exp)\n \n start_time = request.GET['start_time'][:19]\n end_time = request.GET['end_time'][:19]\n \n start_time = datetime.strptime(start_time, \"%Y-%m-%dT%H:%M:%S\")\n end_time=datetime.strptime(end_time, \"%Y-%m-%dT%H:%M:%S\")\n \n calendar_obj = Calendar.objects.get(pk=calendar_id)\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n\n # create a form instance and populate it with data from the request:\n form = AppointmentForm(request.POST)\n\n # check whether it's valid and save it\n if form.is_valid():\n # Save appointment details\n \n mobilephone = form.data['mobilephone']\n email = form.data['email']\n first_name = form.data['first_name']\n last_name = form.data['last_name']\n notes = form.data['notes']\n\n appointment = Appointment(start_time=start_time, end_time=end_time, first_name=first_name, \n last_name=last_name, email=email, mobilephone=mobilephone, notes=notes)\n \n appointment.calendar = calendar_obj\n appointment.save()\n\n try:\n send_appointment_mail(appointment) # send appointment details email\n except Exception as exp:\n print(exp)\n \n try:\n send_appointment_sms(appointment) # send appointment details sms\n except Exception as exp:\n print(exp)\n \n # Calculate reminder schedule dates\n reminder1 = start_time - timedelta(hours=2)\n reminder2 = start_time - timedelta(hours=24)\n reminder3 = start_time - timedelta(days=7)\n\n # Schedule mails\n schedule_mail(reminder1, appointment)\n schedule_mail(reminder2, appointment)\n schedule_mail(reminder3, appointment)\n \n # Schedule sms\n schedule_sms(reminder1, appointment)\n schedule_sms(reminder2, appointment)\n schedule_sms(reminder3, appointment)\n \n return redirect(reverse('appointment:complete_appointment', args=[calendar_id]))\n \n # if a GET (or any other method) we'll create a blank form\n else:\n form = AppointmentForm()\n return render(request, 'appointment_form.html', {'form': form, 'start_time': start_time, 'end_time': end_time,\n 'office_location': calendar_obj.office_location})", "def mkcalendar(self, url, body=\"\", dummy=None):\n return self.request(url, \"MKCALENDAR\", body)", "def abrirCalendar():\n try:\n var.dlgcalendar.show()\n except Exception as error:\n print('Error: %s ' % str(error))", "def appointment():\r\n return render_template(\r\n 'about.html',\r\n title='About',\r\n year=datetime.now().year,\r\n message='Your application description page.'\r\n )", "def create_appointment():\n\n msg = render_template('date')\n return question(msg)", "def refresh_calendar():\n manage.refresh_calendar()", "def post(self):\n global meetups\n user = users.get_current_user()\n if not self.request.get('meetid') in meetups:\n meeting = CanvasSheet()\n meeting.presenter = user\n meetups[self.request.get('meetid')] = meeting\n else:\n self.error(404)\n self.response.out.write('sorry no meeting found \"%s\"' % meetid)\n return", "def calendar_view_link(calendar):\n linkdef = {\n \"label\": calendar.name, \"modal\": True,\n \"title\": _(\"View calendar detail\")\n }\n if calendar.__class__.__name__ == \"UserCalendar\":\n linkdef[\"url\"] = reverse(\n \"modoboa_radicale:user_calendar_detail\", args=[calendar.pk]\n )\n else:\n linkdef[\"url\"] = reverse(\n \"modoboa_radicale:shared_calendar_detail\", args=[calendar.pk]\n )\n return render_link(linkdef)", "def join_meeting(meeting_access, meeting_password):\n # converts url to meeting id if it is a url\n meeting_access = change_meeting_url(meeting_access)\n # opens zoom from the system\n os.system(\"open /Applications/zoom.us.app\")\n time.sleep(3)\n # uses keyboard shortcut to select join\n gui.keyDown('command')\n gui.press('j')\n gui.keyUp('command')\n # meeting id textbox is active by default\n # so the meeting ID can be put in directly\n gui.write(str(meeting_access))\n # enter button proceeds to password page\n gui.press('enter', interval=2)\n # next screen has meeting password as default\n # so we can enter the password directly\n gui.write(str(meeting_password))\n # enter button proceeds to audio selection page\n gui.press('enter')", "def agenda_ical(request, num=None, name=None, acronym=None, session_id=None):\n meeting = get_meeting(num, type_in=None)\n schedule = get_schedule(meeting, name)\n updated = meeting.updated()\n\n if schedule is None and acronym is None and session_id is None:\n raise Http404\n\n assignments = SchedTimeSessAssignment.objects.filter(\n schedule__in=[schedule, schedule.base],\n session__on_agenda=True,\n )\n assignments = preprocess_assignments_for_agenda(assignments, meeting)\n AgendaKeywordTagger(assignments=assignments).apply()\n\n try:\n filt_params = parse_agenda_filter_params(request.GET)\n except ValueError as e:\n return HttpResponseBadRequest(str(e))\n\n if filt_params is not None:\n # Apply the filter\n assignments = [a for a in assignments if should_include_assignment(filt_params, a)]\n\n if acronym:\n assignments = [ a for a in assignments if a.session.historic_group and a.session.historic_group.acronym == acronym ]\n elif session_id:\n assignments = [ a for a in assignments if a.session_id == int(session_id) ]\n\n for a in assignments:\n if a.session:\n a.session.ical_status = ical_session_status(a)\n\n return render(request, \"meeting/agenda.ics\", {\n \"schedule\": schedule,\n \"assignments\": assignments,\n \"updated\": updated\n }, content_type=\"text/calendar\")", "def _parse_calendar(self, response):\n lp = LAParams(line_margin=0.1)\n out_str = StringIO()\n extract_text_to_fp(BytesIO(response.body), out_str, laparams=lp)\n pdf_text = re.sub(r\"\\s+\", \" \", out_str.getvalue()).replace(\" ,\", \",\")\n\n for idx, date_str in enumerate(\n re.findall(r\"[a-zA-Z]{3,10} \\d{1,2}, \\d{4}\", pdf_text)\n ):\n # Ignore every other item\n if idx % 2 == 1:\n continue\n meeting = Meeting(\n title=\"Urban Design and Historic Preservation Commission\",\n description=\"\",\n classification=COMMISSION,\n start=self._parse_start(date_str),\n end=None,\n all_day=False,\n time_notes=\"Confirm details with agency\",\n location=self.location,\n links=[],\n source=self.start_urls[0],\n )\n\n meeting[\"status\"] = self._get_status(meeting)\n meeting[\"id\"] = self._get_id(meeting)\n\n yield meeting", "def make_calendar(\n self, name=None, cal_id=None, supported_calendar_component_set=None\n ):\n return self.calendar_home_set.make_calendar(\n name,\n cal_id,\n supported_calendar_component_set=supported_calendar_component_set,\n )", "def calendar_view(request, calendar_id):\n calendar_obj = Calendar.objects.get(pk=calendar_id)\n try:\n appointments = Appointment.objects.all().filter(calendar=calendar_obj)\n appointments = jsonify(appointments)\n except:\n appointments = []\n calendar_obj = calendar_obj.serialize()\n calendar_obj[\"non_working_days\"] = [day for day in [0, 1, 2, 3, 4, 5, 6] if day not in calendar_obj[\"working_days\"]]\n return render(request, 'calendar_view.html', {'calendar_obj': calendar_obj, 'appointments': appointments})", "def send(self):\n event = gdata.calendar.CalendarEventEntry()\n event.title = atom.Title(text=self.title)\n event.content = atom.Content(text='')\n event.where.append(gdata.calendar.Where(value_string=self.location))\n # Set start time in 6 minutes\n start_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z',\n time.gmtime(time.time() + 6 * 60))\n # Set end time in an hour\n end_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z',\n time.gmtime(time.time() + 3600))\n event.when.append(gdata.calendar.When(start_time=start_time,\n end_time=end_time))\n minutes = 5\n for a_when in event.when:\n if len(a_when.reminder) > 0:\n # Adding reminder in 5 minutes before event (start_time)\n a_when.reminder[0].minutes = 5\n else:\n a_when.reminder.append(\n gdata.calendar.Reminder(minutes=minutes))\n # Insert new event\n new_event = self.calendar_service.InsertEvent(event,\n self.calendar_link)\n return new_event", "def test_calendar(self):\n response = self.app.get(\"/schedule\")\n self.assertTrue(response.status_code, 200)", "def test_meeting(self):\n pass", "def appointment_date(begin_date):\n\n session.attributes['begin_date'] = str(begin_date)\n qs = render_template('time')\n return question(qs)", "def send_reminder(self):\n pass", "def show_events(usrservice,calservice):\r\n print(args.action, args.inuser, 'celendar events')", "def schedule_reservation(reservation_date,reservation_time,party_size,restaurant_name,first_name,restaurant_address):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n\n # Call the Calendar API\n now = datetime.datetime.utcnow()\n\n reservation_day=reservation_date.split('/')[0]\n reservation_month =reservation_date.split('/')[1]\n reservation_year =reservation_date.split('/')[2]\n reservation_date = reservation_year+'-'+reservation_month+'-'+reservation_day\n start_time_hr= reservation_time[:2]\n end_time_hr= int(reservation_time[:2])+4\n start_time_min= reservation_time[2:]\n end_time_min=start_time_min\n \n \n event = {\n 'summary': 'Reservation at '+restaurant_name,\n 'location': restaurant_address,\n 'description': 'Reservation for '+party_size+' under '+first_name+' made on '+str(now),\n 'start': {\n 'dateTime': reservation_date+'T'+start_time_hr+':'+start_time_min+':00+08:00',\n 'timeZone': 'Asia/Singapore',\n },\n 'end': {\n 'dateTime': reservation_date+'T'+str(end_time_hr)+':'+end_time_min+':00+08:00',\n 'timeZone': 'Asia/Singapore',\n },\n 'reminders': {\n 'useDefault': False,\n 'overrides': [\n {'method': 'email', 'minutes': 24 * 60},\n {'method': 'popup', 'minutes': 10},\n ],\n },\n }\n\n event = service.events().insert(calendarId='primary', body=event).execute()\n print ('Event created: %s', (event.get('htmlLink')))", "def get_agenda_with_datetime(message, datetime):\n meeting = database.get_meeting_by_time(message.chat_id, datetime)\n if meeting is None:\n message.reply_text(\n \"No meeting found with the given date and time. Please try again.\"\n )\n else:\n if meeting.agenda:\n message.reply_document(\n meeting.agenda, caption=\"Here's your meeting agenda.\"\n )\n else:\n message.reply_text(\"No meeting agenda found for the meeting.\")" ]
[ "0.64469135", "0.60219747", "0.5896503", "0.5800959", "0.5769794", "0.57033247", "0.56608593", "0.5613643", "0.5602575", "0.55102795", "0.55021197", "0.5475948", "0.5472367", "0.54602873", "0.5457448", "0.5443587", "0.54143536", "0.53765154", "0.53514504", "0.53112346", "0.52830327", "0.5260359", "0.5248901", "0.52359295", "0.5227762", "0.5222438", "0.5207772", "0.5207536", "0.51948917", "0.51748306" ]
0.6283702
1
Plots a confusion matrix of the model predictions to evaluate accuracy
def plot_confusion_matrix(self): interp = ClassificationInterpretation.from_learner(self.learn) interp.plot_confusion_matrix()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_confusion_matrix(name, trained_predictor, X_test, y_test):\n\n fig, ax = plt.subplots()\n fig.tight_layout()\n cm = confusion_matrix(y_test, trained_predictor.predict(X_test), normalize=\"all\")\n ConfusionMatrixDisplay(cm, display_labels=[\"False\", \"True\"]).plot(\n ax=ax\n )\n plt.title(name)", "def plot_confusion_matrix(y_pred, y_true, classes_list):\n fig = plt.figure(figsize=(8, 8))\n cm = confusion_matrix(y_pred, y_true)\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title('Confusion matrix')\n plt.colorbar()\n tick_marks = np.arange(len(classes_list))\n plt.xticks(tick_marks, classes_list, rotation=45)\n plt.yticks(tick_marks, classes_list)\n cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)\n\n thresh = cm.max() / 2.0\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n return fig", "def plot_confusion_matrix(y_test, y_pred, classes,\n normalize=True,\n title='Average accuracy \\n',\n cmap=plt.cm.Blues, verbose = 0, precision = 0):\n from sklearn.metrics import confusion_matrix\n import itertools\n \n cm = confusion_matrix(y_test, y_pred)\n accuracy = (np.sum(np.diag(cm)) / np.sum(cm)) * 100.0\n\n if normalize:\n cm = (cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]) * 100.0\n if verbose == 1:\n print(\"Normalized confusion matrix\")\n else:\n if verbose == 1:\n print('Confusion matrix, without normalization')\n \n if verbose == 1:\n print(cm)\n\n plt.figure(figsize=(18, 9))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title.format_map({'acc':accuracy}), fontsize=25)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45, fontsize=20)\n plt.yticks(tick_marks, classes, fontsize=20)\n\n fmt = '{:.'+ '%d'%(precision) +'f} %' if normalize else '{:d}'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, fmt.format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\", fontsize=16)\n plt.tight_layout()\n plt.ylabel('True label', fontsize=20)\n plt.xlabel('Predicted label', fontsize=20)", "def confusion_matrix(y_true, y_pred):\n skplt.plot_confusion_matrix(y_true, y_pred, normalize=True)\n plt.show()", "def visual_cm(true_y, pred_y, labels = None):\n # visualizing the confusion matrix\n\n # setting labels\n lbls = labels\n \n\n # declaring a confusion matrix object\n cm = confusion_matrix(y_true = true_y,\n y_pred = pred_y)\n\n\n # heatmap\n sns.heatmap(cm,\n annot = True,\n xticklabels = lbls,\n yticklabels = lbls,\n cmap = 'Blues',\n fmt = 'g')\n\n\n plt.xlabel('Predicted')\n plt.ylabel('Actual')\n plt.title('Confusion Matrix of the Classifier')\n plt.show()", "def plot_confusion_matrix(model, y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n # classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n np.set_printoptions(precision=2)\n return ax", "def showConfusionMatrix(self): \r\n sn.heatmap(self.conf_matrix, annot=True)\r\n plt.plot( label=\"Accuracy\")\r\n plt.plot( label=\"Error\")\r\n plt.figtext(0,0,'Accuracy: {}\\nError: {}\\nRecall: {}\\nPrecision: {}'.format(self.accuracy,\r\n self.error,\r\n self.recall,\r\n self.precision))\r\n plt.title('Confusion Matrix')\r\n plt.show()\r\n return None", "def plot_results(self, predictions: list):\n fig, ax = plt.subplots()\n cm = confusion_matrix(self.test[1], predictions)\n conf = confusion_matrix(self.test[1], predictions).ravel()\n nbr_labels = len(set(self.test[1]))\n cm = conf.reshape(nbr_labels, nbr_labels)\n sns.heatmap(cm, annot=True, fmt=\"d\", cmap=\"Spectral\")\n ax.set_xlabel(\"predicted label\")\n ax.set_ylabel(\"true label\")\n fig.savefig(\"confusion_matrix\")\n\n fig, ax = plt.subplots()\n x = self.train[0] + self.test[0]\n y = self.train[1] + self.test[1]\n x = [i[0] for i in x]\n y = [i for i in y]\n results = pd.DataFrame({\"polarity strength\": x, \"true label\": y})\n sns.boxplot(data=results, x=\"true label\", y=\"polarity strength\")\n fig.savefig(\"boxplot\")", "def show_test_results(true_labels: List[int], predictions: List[int], class_names: List[str]):\n confusion_mtx = confusion_matrix(true_labels, predictions)\n plt.figure(figsize=(10, 8))\n sns.heatmap(confusion_mtx, xticklabels=class_names, yticklabels=class_names,\n annot=True, fmt='g')\n plt.xlabel('Prediction')\n plt.ylabel('Label')\n plt.title(\"Confusion matrix\")\n plt.show()\n print(classification_report(true_labels, predictions, target_names=class_names, digits=DIGITS))", "def plot_confusion_matrix(cm, classes, acc,\n cmap=plt.cm.Reds):\n\n plt.figure(figsize=(10, 8))\n title = 'Confusion Matrix (Accuracy: %0.3f%%)' % (acc * 100)\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, fontsize=16)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = 'd'\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if i == 0:\n plt.text(j, i + 0.2, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"black\")\n elif i == cm.shape[1] - 1:\n plt.text(j, i - 0.2, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\")\n else:\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"black\")\n\n plt.ylabel('True label', fontsize=18)\n plt.xlabel('Predicted label', fontsize=18)\n plt.tight_layout()\n plt.show()", "def plot(self):\n plt.imshow(self.cm, interpolation='nearest', cmap=self.cmap)\n plt.title(self.title)\n plt.colorbar()\n tick_marks = np.arange(len(self.classes))\n plt.xticks(tick_marks, self.classes, rotation=45)\n plt.yticks(tick_marks, self.classes)\n \n if self.normalize:\n self.cm = self.cm.astype('float') / self.cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(self.cm)\n \n thresh = self.cm.max() / 2.\n for i, j in itertools.product(range(self.cm.shape[0]), range(self.cm.shape[1])):\n plt.text(j, i, self.cm[i, j], horizontalalignment=\"center\", color=\"white\" if self.cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True Label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(self, y_true, y_pred, title=None):\r\n\r\n if not title:\r\n title = 'confusion matrix'\r\n\r\n # Compute confusion matrix\r\n\r\n y_pred = np.array(y_pred)\r\n y_true = np.array(y_true)\r\n cm = confusion_matrix(y_true, y_pred)\r\n # Only use the labels that appear in the data\r\n classes = self.classes\r\n print('Confusion matrix')\r\n\r\n print(cm)\r\n fig2, ax = plt.subplots()\r\n im = ax.imshow(cm, interpolation='nearest')\r\n ax.figure.colorbar(im, ax=ax)\r\n # We want to show all ticks...\r\n ax.set(xticks=np.arange(cm.shape[1]),\r\n yticks=np.arange(cm.shape[0]),\r\n # ... and label them with the respective list entries\r\n xticklabels=classes, yticklabels=classes,\r\n title=title,\r\n ylabel='True label',\r\n xlabel='Predicted label')\r\n\r\n # Rotate the tick labels and set their alignment.\r\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\r\n rotation_mode=\"anchor\")\r\n\r\n # Loop over data dimensions and create text annotations.\r\n fmt = '.2f'\r\n thresh = cm.max() / 2.\r\n for i in range(cm.shape[0]):\r\n for j in range(cm.shape[1]):\r\n ax.text(j, i, format(cm[i, j], fmt),\r\n ha=\"center\", va=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n fig2.tight_layout()\r\n file_loc = [str(self.training_path) +\r\n '\\\\checkpoints\\\\confusion_matrix.jpg'] # NEED TO FIX\r\n s = \"\"\r\n s = s.join(file_loc)\r\n conf_path = Path(s)\r\n plt.savefig(conf_path)\r\n plt.show()\r\n\r\n return ax", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap= cm.Blues,\n save:bool = False):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cmat = confusion_matrix(y_true, y_pred,labels = classes)\n # Only use the labels that appear in the data\n if normalize:\n cmat = cmat.astype('float') / cmat.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cmat)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cmat, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cmat.shape[1]),\n yticks=np.arange(cmat.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cmat.max() / 2.\n for i in range(cmat.shape[0]):\n for j in range(cmat.shape[1]):\n ax.text(j, i, format(cmat[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cmat[i, j] > thresh else \"black\")\n fig.tight_layout()\n plt.show()\n if save:\n cwd=os.getcwd()\n fig.savefig(os.path.join(cwd, 'Keras\\\\Model_images', title +'_CM.png'))\n return ax", "def plot_cnf_matrix(y_pred, y_test):\n\tprint(\"\\t\\tGenerating confusion matrix\")\n\n\tmatrix = confusion_matrix(y_test, y_pred)\n\tclasses = [\"0\", \"1\"]\n\n\tplt.imshow(matrix, interpolation=\"nearest\", cmap=plt.cm.Blues)\n\tplt.title(\"Confusion Matrix\")\n\tplt.colorbar()\n\n\ttick_marks = np.arange(len(classes))\n\n\tplt.xticks(tick_marks, classes, rotation=45)\n\tplt.yticks(tick_marks, classes)\n\n\tthresh = matrix.max() / 2.0\n\tfor i, j in itertools.product(range(matrix.shape[0]), range(matrix.shape[1])):\n\t\tplt.text(j, i, format(matrix[i, j], \"d\"), horizontalalignment=\"center\", color=\"white\" if matrix[i, j] > thresh else \"black\")\n\n\tplt.tight_layout()\n\tplt.ylabel(\"True Label\")\n\tplt.xlabel(\"Predicted Label\")\n\n\tfilename = \"\"\n\n\t#Save the image in the current directory\n\tif COUNTER == 0:\n\t\tfilename = \"/img/log_reg_confusion_matrix.png\"\n\telif COUNTER == 1:\n\t\tfilename = \"/img/rand_forest_confusion_matrix.png\"\n\telse:\n\t\tfilename = \"/img/gbm_confusion_matrix.png\"\n\n\tplt.savefig(PARENT_DIR + filename, bbox_inches='tight')\n\tincrement_counter()", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots(figsize=(8, 8))\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes)\n ax.set_title(title,size = 20)\n ax.set_ylabel('True label',size = 20)\n ax.set_xlabel('Predicted label',size = 20)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\",size = 18)\n plt.setp(ax.get_yticklabels(),size = 18)\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n name = OUTFOLDER + \"/confusion_matrix_batch%d_layers%d_epochs%d_f1%d\" % (BATCH_SIZE,LAYERS,EPOCHS,f1_mean_test*100)\n if normalize:\n name = name + \"_norm\"\n plt.savefig(name)\n plt.close()\n return ax", "def plot_confusion_matrix(cm, y_test, y_pred, class_names,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('\\n')\n print(\"Normalized confusion matrix\")\n else:\n print('\\n')\n print('Confusion matrix, without normalization')\n print_cm(cm, class_names)\n text_labels = [['True Negative', 'False Positive'],\n ['False Negative', 'True Positive']]\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i - 0.1, format(cm[i, j], fmt),\n verticalalignment='bottom',\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.text(j, i + 0.1, text_labels[i][j],\n verticalalignment='top',\n horizontalalignment=\"center\",\n fontsize=12,\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n # Print accuracy and precision\n print('Accuracy: ', accuracy_score(y_test, y_pred, normalize=True))\n print('Precision: ', precision_score(y_test, y_pred, average='macro'))\n print('Roc-Auc: ', roc_auc_score(y_test, y_pred))\n # Plot non-normalized confusion matrix", "def conf_matrix(model, X_train, y_train, X_test, y_test, train=True):\n from sklearn.metrics import confusion_matrix\n import itertools\n if train==True: \n ypredTrain = model.predict(X_train)\n cm = confusion_matrix(y_train, ypredTrain)\n def plot_conf_matrix(cm, classes, title='Confusion Matrix', cmap=plt.cm.Reds):\n plt.figure(figsize = (5, 5))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, size = 14)\n plt.colorbar(aspect=4)\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0, size = 10)\n plt.yticks(tick_marks, classes, size = 10)\n fmt = 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), fontsize = 14,\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.grid(b=None)\n plt.tight_layout()\n plt.ylabel('True label', size = 12)\n plt.xlabel('Predicted label', size = 12)\n plot_conf_matrix(cm, classes = ['Covid-', 'Covid+'], \n title = 'Confusion Matrix\\n\\n(Train)\\n')\n elif train==False:\n ypredTest = model.predict(X_test)\n cm = confusion_matrix(y_test, ypredTest)\n def plot_conf_matrix(cm, classes, title='Confusion Matrix', cmap=plt.cm.Blues):\n plt.figure(figsize = (5, 5))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, size = 14)\n plt.colorbar(aspect=4)\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0, size = 10)\n plt.yticks(tick_marks, classes, size = 10)\n fmt = 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), fontsize = 14,\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.grid(b=None)\n plt.tight_layout()\n plt.ylabel('True label', size = 12)\n plt.xlabel('Predicted label', size = 12)\n plot_conf_matrix(cm, classes = ['Covid-', 'Covid+'], \n title = 'Confusion Matrix\\n\\n(Test)\\n')", "def plot_confusion_matrix(cm, class_names):\n figure = plt.figure(figsize=(10, 10))\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(\"Confusion matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n \n\n # Normalize the confusion matrix.\n #cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)\n\n # Use white text if squares are dark; otherwise black.\n #threshold = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n #print(cm[i, j])\n color = \"black\"\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=color)\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n return figure", "def plot_conf_mat(y_test, y_preds):\n sns.set(font_scale=1.5) # Increase font size\n fig, ax = plt.subplots(figsize=(3, 3))\n ax = sns.heatmap(confusion_matrix(y_test, y_preds),\n annot=True, # Annotate the boxes\n cbar=False)\n plt.xlabel(\"Predicted label\") # predictions go on the x-axis\n plt.ylabel(\"True label\") # true labels go on the y-axis\n return fig, ax", "def plot_confusion_matrix(\n y_true, y_pred, classes, normalize=True, title=\"Confusion matrix\", cmap=plt.cm.Blues\n):\n cm = confusion_matrix(y_true, y_pred)\n\n if normalize:\n cm = cm.astype(\"float\") / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print(\"Confusion matrix, without normalization\")\n\n plt.imshow(cm, interpolation=\"nearest\", cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = \".2f\" if normalize else \"d\"\n thresh = cm.max() / 2.0\n for i, j in product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(\n j,\n i,\n format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",\n )\n\n plt.tight_layout()\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")", "def get_accuracy_and_plot_confusion(y_correct, y_pred, classes, plot=True, title='Confusion matrix'):\n if plot:\n cm = confusion_matrix(y_correct, y_pred)\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n return accuracy_score(y_correct, y_pred)", "def plot_confusion_matrix(cm_res):\n fig, ax = plt.subplots(figsize=(5,5))\n ax.matshow(cm_res, cmap=plt.cm.Blues, alpha=0.3)\n for i in range(cm_res.shape[0]):\n for j in range(cm_res.shape[1]):\n ax.text(x=j, y=i,s=cm_res[i, j], va='center', ha='center')\n\n plt.xlabel('predicted label')\n plt.ylabel('true label')", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=True,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix (in %)'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n #tmp = unique_labels(y_true, y_pred)\n #classes = classes[tmp]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n cm = cm * 100 # affichage en % pour une meilleure visibilité\n print(\"Normalized confusion matrix (in %)\")\n else:\n print('Confusion matrix, without normalization')\n\n #print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label',\n # xlim = (-0.5,len(classes)-0.5),\n # ylim = (-0.5,len(classes)-0.5)\n )\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=90, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n # fmt = '.2f' if normalize else 'd'\n fmt = '.0f' if normalize else 'd' #pour une meilleure visibilité\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n #fig.tight_layout()\n plt.show()\n return ax", "def plotConfusionMatrix(y, pred, title, labels, outfile, cmap=plt.cm.Blues):\n \n cm = confusion_matrix(y, pred);\n ncm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n accuracy = accuracy_score(y, pred)\n \n fig = plt.figure(figsize=(10, 10))\n plt.imshow(ncm, interpolation='nearest', cmap=cmap, vmin=0, vmax=1)\n plt.title(title+\" Acc: \"+str(accuracy)+\")\")\n plt.colorbar()\n for i in range(0,len(labels)):\n for j in range(0,len(labels)):\n plt.text(j,i,cm[i,j],va='center',ha='center')\n tick_marks = np.arange(len(labels))\n plt.xticks(tick_marks, labels, rotation=45)\n plt.yticks(tick_marks, labels)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n pdfplot = PdfPages(outfile);\n pdfplot.savefig(fig)\n pdfplot.close()", "def plot_confusion_matrix(y_true, predictions, classes,\r\n normalize=False,\r\n title=None,\r\n cmap=plt.cm.Blues):\r\n if not title:\r\n if normalize:\r\n title = 'Normalized confusion matrix'\r\n else:\r\n title = 'Confusion matrix, without normalization'\r\n\r\n # Compute confusion matrix\r\n cm = sklearn.metrics.confusion_matrix(y_true, predictions)\r\n # Only use the labels that appear in the data\r\n classes = classes[unique_labels(y_true, predictions)]\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n fig, ax = plt.subplots()\r\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\r\n ax.figure.colorbar(im, ax=ax)\r\n # We want to show all ticks...\r\n ax.set(xticks=np.arange(cm.shape[1]),\r\n yticks=np.arange(cm.shape[0]),\r\n # ... and label them with the respective list entries\r\n xticklabels=classes, yticklabels=classes,\r\n title=title,\r\n ylabel='True label',\r\n xlabel='Predicted label')\r\n\r\n # Rotate the tick labels and set their alignment.\r\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\r\n rotation_mode=\"anchor\")\r\n\r\n # Loop over data dimensions and create text annotations.\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i in range(cm.shape[0]):\r\n for j in range(cm.shape[1]):\r\n ax.text(j, i, format(cm[i, j], fmt),\r\n ha=\"center\", va=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n fig.tight_layout()\r\n return ax", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n plt.show()", "def plot_confusion_matrices(val_labels, val_predictions, model_path=None):\n matrices = []\n class_names = [\n 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'\n ]\n # transform possibilities to labels of classes\n val_preds = np.zeros(val_predictions.shape, dtype=int)\n val_preds[val_predictions > 0.5] = 1\n # build set of confusion matrices\n for cl in range(val_labels.shape[1]):\n y_true = val_labels[:, cl].tolist()\n y_pred = val_preds[:, cl].tolist()\n matrices.append(confusion_matrix(y_true, y_pred))\n for idx in range(len(matrices)):\n cur_matr = matrices[idx]\n _, ax = plt.subplots(figsize=(8, 8))\n ax.matshow(cur_matr, cmap=plt.cm.Blues, alpha=0.2)\n plt.title(class_names[idx])\n plt.ylabel('real class', fontsize=16)\n plt.xlabel('predicted class', fontsize=16)\n # fill the plot of data from confusion matrix\n for i in range(cur_matr.shape[0]):\n for j in range(cur_matr.shape[1]):\n ax.text(\n x=j,\n y=i,\n s=cur_matr[i, j],\n va='center',\n ha='center',\n fontsize=24)\n file_name = '{}_confusion_matrix.png'.format(str(class_names[idx]))\n plt.savefig(path.join(model_path, file_name))\n plt.clf()", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n #classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=90, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n fig.set_size_inches(18.5, 10.5)\n return ax", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return fig, ax", "def plot_cnf_matrix(self):\n\t\tplt.figure()\n\t\tanalysis.plot_cnf_matrix(self.y_pred, self.y_test)" ]
[ "0.7954949", "0.79269266", "0.7894503", "0.7827766", "0.78108096", "0.7801344", "0.7790977", "0.77880204", "0.7729956", "0.7723762", "0.77151185", "0.76916933", "0.76525074", "0.7649402", "0.7639235", "0.7628763", "0.75990075", "0.7597474", "0.7595721", "0.7591373", "0.7587811", "0.7586193", "0.7573638", "0.75609666", "0.7545295", "0.75328606", "0.7528659", "0.7514266", "0.7509096", "0.75004107" ]
0.8134829
0
Classifies the labeled tiles and updates the feature layer with the prediction results with column output_label_field. ==================================== ==================================================================== Argument Description feature_layer Required. Feature Layer for classification. labeled_tiles_directory Required. Folder structure containing images and labels folder. The chips should have been generated using the export training data tool in the Labeled Tiles format, and the labels should contain the OBJECTIDs of the features to be classified. input_label_field Required. Value field name which created the labeled tiles. This field should contain the OBJECTIDs of the features to be classified. output_label_field Required. Output column name to be added in the layer which contains predictions. confidence_field Optional. Output column name to be added in the layer which contains the confidence score. ==================================== ====================================================================
def classify_features(self, feature_layer, labeled_tiles_directory, input_label_field, output_label_field, confidence_field=None): ALLOWED_FILE_FORMATS = ['tif', 'jpg', 'png'] IMAGES_FOLDER = 'images/' LABELS_FOLDER = 'labels/' files = [] for ext in ALLOWED_FILE_FORMATS: files.extend(glob.glob(os.path.join(labeled_tiles_directory, IMAGES_FOLDER + '*.' + ext))) predictions = {} for file in files: xml_path = os.path.join(os.path.dirname(os.path.dirname(file)), os.path.join(LABELS_FOLDER, os.path.basename(file).split('.')[0] + '.xml')) if not os.path.exists(xml_path): continue tree = ElementTree.parse(xml_path) root = tree.getroot() name_field = root.findall('object/name') if len(name_field) != 1: continue file_prediction = self.predict(file) predictions[name_field[0].text] = { 'prediction': file_prediction[0].obj, 'score': str(file_prediction[2].data.max().tolist()) } features = feature_layer.query().features features_to_update = [] for feature in features: if predictions.get(str(feature.attributes[input_label_field])): feature.attributes[output_label_field] = predictions.get(str(feature.attributes[input_label_field]))['prediction'] if confidence_field: feature.attributes[confidence_field] = predictions.get(str(feature.attributes[input_label_field]))['score'] features_to_update.append(feature) field_template = { "name": output_label_field, "type": "esriFieldTypeString", "alias": output_label_field, "sqlType": "sqlTypeOther", "length": 256, "nullable": True, "editable": True, "visible": True, "domain": None, "defaultValue": '' } confidence_field_template = { "name": confidence_field, "type": "esriFieldTypeString", "alias": confidence_field, "sqlType": "sqlTypeOther", "length": 256, "nullable": True, "editable": True, "visible": True, "domain": None, "defaultValue": '' } feature_layer.manager.add_to_definition({'fields': [field_template]}) if confidence_field: feature_layer.manager.add_to_definition({'fields': [confidence_field_template]}) try: start = 0 stop = 100 count = 100 features_updated = features_to_update[start:stop] feature_layer.edit_features(updates=features_updated) time.sleep(2) while count == len(features_updated): start = stop stop = stop + 100 features_updated = features_to_update[start:stop] feature_layer.edit_features(updates=features_updated) time.sleep(2) except Exception as e: feature_layer.manager.delete_from_definition({'fields': [field_template]}) if confidence_field: feature_layer.manager.delete_from_definition({'fields': [confidence_field_template]}) return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_labels(filename, num_images, starting_id, context_factor):\n gt_imgs = []\n for i in range(starting_id, num_images+starting_id):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n gt_imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(gt_imgs)\n # it means that we base our labels only on the core of the patch, not including the contet added\n context_factor = 0\n gt_patches = [img_crop_context(gt_imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,context_factor) for i in range(num_images)]\n data = np.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n labels = np.asarray([value_to_class(np.mean(data[i])) for i in range(len(data))])\n\n # Convert to dense 1-hot representation.\n return labels.astype(np.float32)", "def extract_labels(f, one_hot=False, num_classes=10):\n\tprint('Extracting', f.name)\n\twith gzip.GzipFile(fileobj=f) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2049:\n\t\t\traise ValueError('Invalid magic number %d in MNIST label file: %s' %\n\t\t\t\t\t\t\t\t\t\t\t (magic, f.name))\n\t\tnum_items = _read32(bytestream)\n\t\tbuf = bytestream.read(num_items)\n\t\tlabels = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tif one_hot:\n\t\t\treturn dense_to_one_hot(labels, num_classes)\n\t\treturn labels", "def __convert_labeled_featuresets(self, labeled_featuresets, output):\n\n\t\tif isinstance(output, str):\n\t\t\toutput = open(output,'w')\n\t\telif not isinstance(output, file):\n\t\t\traise TypeError('output is a str or a file.')\n\n\t\tfor featureset, label in labeled_featuresets:\n\t\t\tfeat, label = self.__text_converter.toSVM(\" \".join(featureset), label)\n\t\t\tfeat = ''.join(' {0}:{1}'.format(f,feat[f]) for f in sorted(feat))\n\t\t\tif label == None:\n\t\t\t\tlabel = -1\n\t\t\toutput.write(str(label) + ' ' + feat + '\\n')\n\t\toutput.close()", "def extract_labels(f, one_hot=False, num_classes=10):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels", "def extract_labels(f, one_hot=False, num_classes=10):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels", "def extract_labels(f, one_hot=False, num_classes=10):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels", "def extract_labels(f, one_hot=False, num_classes=10):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels", "def label_file(input_file):\n file_name, file_ext = os.path.splitext(input_file)\n output_file = file_name + \".label\" + file_ext\n\n # read input file and save them in dict\n features = load_protobuf(input_file)\n\n # for each obstacle ID, sort dict by their timestamp\n fea_trajs = build_trajectory(features)\n\n # for each obstacle ID, label them, remove record cannot be labeled\n for fea_key, fea_traj in fea_trajs.items():\n fea_traj = fea_trajs[fea_key]\n fea_traj = TrajectoryToSample.clean(fea_traj)\n fea_traj = TrajectoryToSample.label(fea_traj)\n for i, fea in enumerate(fea_traj):\n if not fea.HasField('label_update_time_delta'):\n del fea_traj[i]\n continue\n if fea.label_update_time_delta < parameters['feature']['threshold_label_time_delta']:\n del fea_traj[i]\n fea_trajs[fea_key] = fea_traj\n # save them in the output file with the same format as the input file\n save_protobuf(output_file, fea_trajs.values())", "def extract_labels(f, one_hot=False, num_classes=10):\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels", "def mapping_image_to_label (self, labels_df, polygons, fpath_tiff): \n \n unread_tiff = rasterio.open(fpath_tiff)\n\n #Projecting the coordinates to that CRS \n proj = Proj(init='epsg:32618')\n data = []\n labels = []\n failed = []\n \n src = rasterio.open(fpath_tiff, 'r')\n outfolder = '/train/batch'\n \n print (\"Hold on tight! Mapping each image to its respective label...\")\n \n \n for num, row in labels_df.iterrows():\n try:\n \n \n roof_material_num = 0\n polygon0 = polygons [num]\n polygon0['coordinates'] = self.transforming_coordinates(polygon0['coordinates'], proj)\n masked_image, out_transform = rasterio.mask.mask(src,[polygon0], filled = True, crop=True, nodata = 0)\n img_image = reshape_as_image (masked_image)\n \n #Defining the name of the image file as \"buildingID+roofMaterial+png\" and its path \n img_path = os.path.join (outfolder, str (row['id'])+'-'+ str (row['roof_material'])+'.png')\n \n #swapping the color channels from RGB2BGR\n img_image = cv2.cvtColor (img_image, cv2.COLOR_RGB2BGR) #img_image is a numpy array\n \n #resizing the image dimensions to 128x128 to match ImageNet dimensions\n img_image = cv2.resize(img_image, (128, 128))\n \n #writing the image in the file\n #cv2.imwrite (img_path, img_image)\n # update the data and labels lists, respectively\n data.append(img_image) #data is a list\n labels.append(row['roof_material'])\n \n except Exception as e:\n print (e)\n failed.append (num)\n \n \n #print number of images we failed to crop and write \n print (\"Bad News First: Failed to write\", len(failed), \"Images.\")\n print (\"Good News: Successfully mapped\", len (data), \"Images.\")\n data = np.array(data)\n labels = np.array(labels)\n #batch = data.sample(frac=0.5, replace=False, random_state=1)\n #print(\"Size and shape of validY: {}\\n\".format(batch.shape))\n return data, labels", "def _run_labelled_extraction(self, dataset_type: DatasetType, device: str) -> None:\n dataset = self.image_datasets.get_dataset(dataset_type)\n self.extractor_model = self.extractor_model.to(device)\n\n filenames = []\n labels = []\n for i in tqdm(\n range(len(dataset)), desc=\"Extracting features - \" + dataset_type.name\n ):\n image, image_label, filename = dataset.getitem_filename(i)\n # Extract tensor and save\n feature_tensor = self.extractor_model(image.unsqueeze(0).to(device))\n self._save_tensor(dataset_type, feature_tensor, filename)\n filenames.append(filename)\n labels.append(image_label)\n\n # Save labels file\n labels_filepath = self.get_labels_filepath(dataset_type)\n with open(labels_filepath, \"w+\") as file:\n csv_writer = csv.writer(file)\n for filename, label in zip(filenames, labels):\n csv_writer.writerow([filename, label])", "def __get_labels(self):\n\n uncertain_pairs_index = self.__query_pairs()\n\n to_label_raw = self.all_raw_data.loc[uncertain_pairs_index]\n to_label_features = self.all_features.loc[uncertain_pairs_index]\n\n # Remove uncertain pairs from the candidate pool\n self.all_features.drop(uncertain_pairs_index, axis=0, inplace=True)\n\n labels_list = []\n for index, row in to_label_raw.iterrows():\n\n print(\"\\n{0:30}\\t{1}\\n{2:30}\\t{3}\\n{4:30}\\t{5}\\n{6:30}\\t{7}\\n\".format(row.name_a, row.name_b,\n row.address_a, row.address_b,\n row.zip_a, row.zip_b,\n row.city_a, row.city_b))\n\n\n label = self.__user_input(\"Is this a match? (0/1)\")\n labels_list.append((index, label))\n\n labels_index = [index for index, label in labels_list]\n labels_values = [label for index, label in labels_list]\n\n # Create dataframe with index and labels\n add_labels = pd.Series(labels_values, index=labels_index, name='label')\n\n # Union the new training set to the full training set\n self.labeled_features = pd.concat([self.labeled_features, to_label_features], axis = 0, ignore_index=False)\n self.labeled_labels = pd.concat([self.labeled_labels, add_labels], axis = 0, ignore_index=False)\n\n return self", "def write_feature_labels(output, feature_labels):\n with open(os.path.join(output, 'features.list'), 'w') as out_file:\n out_file.write('\\n'.join(feature_labels))", "def extract_labels(filename, num_images):\n gt_imgs = []\n for i in range(1, num_images+1):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n gt_imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(gt_imgs)\n gt_patches = [img_crop(gt_imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE, 0, False) for i in range(num_images)]\n data = numpy.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n labels = numpy.asarray([value_to_class(numpy.mean(data[i])) for i in range(len(data))])\n\n # Convert to dense 1-hot representation.\n return labels.astype(numpy.float32)", "def _process_features(self, limit):\n\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n raw = '/'.join((self.rawdir, 'feature'))\n logger.info(\"building labels for features\")\n\n line_counter = 0\n with open(raw, 'r') as f:\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n f.readline() # read the header row; skip\n for line in filereader:\n (feature_id, dbxref_id, organism_id, name, uniquename,\n residues, seqlen, md5checksum, type_id, is_analysis,\n timeaccessioned, timelastmodified) = line\n\n feature_key = feature_id\n if re.search(r'[\\|\\s\\[\\]\\{\\}\\\\<\\>]', uniquename):\n # some uniquenames have pipes or other nasty chars!\n # for example: FB||||FBrf0133242|Hugh-u1\n feature_id = self._makeInternalIdentifier(\n 'feature', feature_key)\n else:\n feature_id = 'FlyBase:'+uniquename\n self.idhash['feature'][feature_key] = feature_id\n self.feature_types[feature_key] = type_id\n self.label_hash[feature_id] = name\n\n if feature_key not in self.feature_to_organism_hash:\n self.feature_to_organism_hash[feature_key] = set()\n self.feature_to_organism_hash[feature_key].add(organism_id)\n\n # HACK - FBgn are genes, and therefore classes,\n # all else be individuals\n is_gene = False\n if re.search(r'(FBgn|FBog)', feature_id):\n self.idhash['gene'][feature_key] = feature_id\n is_gene = True\n elif re.search(r'FBa[lb]', feature_id):\n self.idhash['allele'][feature_key] = feature_id\n elif re.search(r'FBt[ip]', feature_id):\n self.idhash['feature'][feature_key] = feature_id\n\n if self.testMode and \\\n int(feature_key) not in self.test_keys['gene'] + \\\n self.test_keys['allele'] + self.test_keys['feature']:\n continue\n\n # now do something with it!\n # switch on type_id\n if name.strip() == '':\n name = uniquename\n\n type_key = type_id\n type_id = self.idhash['cvterm'][type_key]\n\n # skip some features by type\n types_to_skip = [\n 'SO:0000316', # CDS\n 'SO:0000696', # oligos\n 'SO:0000358', # polypeptide\n 'SO:0000234', # transcripts\n ]\n\n type_keys_to_skip = [\n 596, # pcr_product\n 57096, # mature peptide\n 57097, # signal_peptide\n 57270, # repeat masker\n 58210, # alignment\n 59643, # cDNA_clone\n 60006, # uncharacterized_change_in_nucleotide_sequence\n 61351, # oligo\n 61467, # polypeptide_domain\n 257, # exon\n 286, # intron\n ]\n\n organisms_to_skip = [\n 2 # computational result\n ]\n\n if type_id in types_to_skip \\\n or int(type_key) in type_keys_to_skip\\\n or int(organism_id) in organisms_to_skip:\n continue\n\n line_counter += 1\n\n if int(type_key) == 604: # RNAi_reagent\n # TODO add other reagents?\n self.idhash['reagent'][feature_key] = feature_id\n\n # deal with the taxonomy\n # only get taxa for features that are actually used in our set\n tax_internal_id = self._makeInternalIdentifier(\n 'organism', organism_id)\n if organism_id not in self.checked_organisms:\n # will get the NCBITax if necessary\n tax_id = self._get_organism_id(organism_id)\n self.checked_organisms.add(organism_id)\n else:\n tax_id = self.idhash['organism'][organism_id]\n\n tax_label = self.label_hash.get(tax_id)\n if not re.search(r'FBog', feature_id) \\\n and re.search(r'Drosophila', tax_label):\n # make only fly things leaders\n model.makeLeader(feature_id)\n\n if not self.testMode \\\n and limit is not None and line_counter > limit:\n pass\n else:\n if is_gene:\n model.addClassToGraph(\n feature_id, name, type_id)\n g.addTriple(\n feature_id, model.object_properties['in_taxon'],\n tax_id)\n else:\n if re.search('FBa[lb]', feature_id):\n type_id = Genotype.genoparts['allele']\n model.addIndividualToGraph(feature_id, name, type_id)\n\n # stop adding what we do not appreciate\n # if is_obsolete == 't':\n # if is_gene:\n # model.addDeprecatedClass(feature_id)\n # else:\n # model.addDeprecatedIndividual(feature_id)\n # self.deprecated_features.add(feature_key)\n\n model.addClassToGraph(tax_id)\n if tax_id != tax_internal_id:\n model.addEquivalentClass(tax_id, tax_internal_id)\n\n model.addComment(\n feature_id,\n self._makeInternalIdentifier('feature', feature_key))\n\n # TODO save checked_organisms fbid to ncbitax mapping to\n # a local file to speed up subsequent searches\n\n return", "def extract_labels(filename,tag,one_hot):\n print('Extracting labels',filename)\n return extractdb_labels(filename,tag,one_hot=one_hot)", "def feature_layer_op(self, inputs: Dict[str, Input]):\n train_features, metadata_features = define_feature_layer(\n feature_config=self.feature_config,\n tfrecord_type=self.tfrecord_type,\n feature_layer_map=self.feature_layer_map,\n file_io=self.file_io,\n )(inputs)\n\n return train_features, metadata_features", "def main():\n feature_extraction_model = \"HOG\"\n # feature_extraction_models = [\"CM\", \"HOG\"]\n feature_extraction_model_1 = \"CM\"\n dimension_reduction_model = \"PCA\"\n k_value = 10\n dim_k_value = 40\n # K_value = 20\n # lab_folder = \"Dataset3/Labelled/Set1\"\n # unlab_folder = \"Dataset3/Unlabelled/Set 2\"\n lab_folder = get_input_folder(\"Labelled Folder\")\n unlab_folder = get_input_folder(\"Classify\")\n start = time.time()\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab = dim_red.get_object_feature_matrix()\n features_list_lab = np.array(obj_feat_lab['featureVector'].tolist())\n images_list_lab = np.array(obj_feat_lab['imageId'])\n # filtering the labelled set\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab = dim_red.get_object_feature_matrix()\n features_list_unlab = np.array(obj_feat_unlab['featureVector'].tolist())\n images_list_unlab = np.array(obj_feat_unlab['imageId'])\n\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab_1 = dim_red.get_object_feature_matrix()\n features_list_lab_1 = np.array(obj_feat_lab_1['featureVector'].tolist())\n # images_list_lab = np.array(obj_feat_lab_1['imageId'])\n # filtering the labelled set\n\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab_1 = dim_red.get_object_feature_matrix()\n features_list_unlab_1 = np.array(obj_feat_unlab_1['featureVector'].tolist())\n # images_list_unlab = np.array(obj_feat_unlab['imageId'])\n features_list_lab = np.concatenate((features_list_lab, features_list_lab_1), axis=1)\n features_list_unlab = np.concatenate((features_list_unlab, features_list_unlab_1), axis=1)\n\n # ================================================================================================================\n\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n features_list = np.concatenate((features_list_lab, features_list_unlab))\n images_list = np.concatenate((images_list_lab, images_list_unlab))\n images_list = list(images_list)\n # Finding Similarity Matrix\n cos_sim = cosine_similarity(features_list)\n sim_graph = np.empty((0, len(cos_sim)))\n for row in cos_sim:\n k_largest = np.argsort(-np.array(row))[1:k_value + 1]\n sim_graph_row = [d if i in k_largest else 0 for i, d in enumerate(row)]\n sim_graph = np.append(sim_graph, np.array([sim_graph_row]), axis=0)\n\n row_sums = sim_graph.sum(axis=1)\n sim_graph = sim_graph / row_sums[:, np.newaxis]\n idx = 0\n results_dorsal = ppr(sim_graph, images_list, dorsal_list)\n results_palmar = ppr(sim_graph, images_list, palmar_list)\n final_results = {}\n\n for img in images_list_unlab:\n if results_dorsal[img] < results_palmar[img]:\n final_results[img] = \"dorsal\"\n else:\n final_results[img] = \"palmar\"\n\n actual_labels = fetch_actual_labels(images_list_unlab)\n print(\"Classification\")\n no_correct = 0\n correctly_classified = []\n incorrectly_classified = []\n print(\"| ImageId | Prediction | Actual |\")\n for r in final_results:\n print(\"| {} | {} | {} |\".format(r, final_results[r], actual_labels[r]))\n if final_results[r] == actual_labels[r]:\n correctly_classified.append(r)\n no_correct += 1\n else:\n incorrectly_classified.append(r)\n\n print(\"Correctly classified: {}\\n\".format(correctly_classified))\n print(\"InCorrectly classified: {}\\n\".format(incorrectly_classified))\n\n print(\"Classification Accuracy: {}%\".format(no_correct / len(images_list_unlab) * 100))\n print(\"Execution time: {} seconds\".format(time.time() - start))", "def __multilabel_processing(self):\n # read the raw dataset\n self.data['image_name'] = self.data['image_name'].map(lambda x: '{}.{}'.format(x, img_format))\n self.data['tags'] = self.data['tags'].map(lambda x: x.split())\n\n # create a df with the same number of rows as the dataset filled with the name of the unique values in tags\n label_names = self.data['tags'].explode().unique().tolist()\n label_df = pd.DataFrame([label_names] * self.data.shape[0], columns=label_names)\n\n # binarize the labels according to if they exist for each image or not\n self.data = pd.concat([self.data, label_df], axis=1)\n self.data[['image_name'] + label_names] = self.data.apply(lambda x: pd.Series([x[0]] + [1 if label in x[1] else 0 for label in x[2:]]), axis=1)", "def extract_features(image_path: str, features: [DefinitionFeature], labeled_pixels: [LabeledPixel] = None,\n nb_random_pixel_to_add: int = 0) -> FeatureTable:\n image = Image(image_path)\n # If there are not labeled pixel, we are predicting the image,\n # so we will extract all the pixels, so we retrieve them\n if labeled_pixels is None:\n labeled_pixels = set()\n for x in list(range(image.width)):\n for y in list(range(image.height)):\n labeled_pixels.add(LabeledPixel((x, y), 'Unknown'))\n\n if nb_random_pixel_to_add > image.width * image.height - len(labeled_pixels):\n raise AttributeError(\n \"There are not enough pixels to add \" + str(nb_random_pixel_to_add) + \" C0 pixels into the image.\")\n\n # We retrieve the list of features to use\n feature_to_compute = set()\n features = sorted(list(features))\n for feature in features:\n feature_to_compute.add((feature.required_feature, feature.mask_size))\n\n # Add some random pixels\n i = 0\n while i < nb_random_pixel_to_add:\n x, y = random.randint(0, image.width - 1), random.randint(0, image.height - 1)\n old_size = len(labeled_pixels)\n labeled_pixels.add(LabeledPixel((x, y), 'C0'))\n i = i + len(labeled_pixels) - old_size\n result = FeatureTable()\n labeled_pixels = sorted(labeled_pixels)\n\n # For each labeled pixel, we extract the features and add an individual to the result\n for labeled_pixel in labeled_pixels:\n dictionary = dict()\n for feature in feature_to_compute:\n dictionary.update(feature[0].compute(labeled_pixel.pixel, image, feature[1]))\n sample = list()\n for feature in features:\n sample.append(dictionary[feature.full_name])\n result.add_individual(\n Individual(labeled_pixel.label, sample, (labeled_pixel.pixel[0], labeled_pixel.pixel[1])))\n return result", "def __init__(self, load_instance_masks=False, label_map_proto_file=None, use_display_name=False):\n self.keys_to_features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/filename':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/key/sha256':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/source_id':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/height':\n tf.FixedLenFeature((), tf.int64, 1),\n 'image/width':\n tf.FixedLenFeature((), tf.int64, 1),\n # Object boxes and classes.\n 'image/object/bbox/xmin':\n tf.VarLenFeature(tf.float32),\n 'image/object/bbox/xmax':\n tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymin':\n tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymax':\n tf.VarLenFeature(tf.float32),\n 'image/object/class/label':\n tf.VarLenFeature(tf.int64),\n 'image/object/class/text':\n tf.VarLenFeature(tf.string),\n 'image/object/area':\n tf.VarLenFeature(tf.float32),\n 'image/object/is_crowd':\n tf.VarLenFeature(tf.int64),\n 'image/object/difficult':\n tf.VarLenFeature(tf.int64),\n 'image/object/group_of':\n tf.VarLenFeature(tf.int64),\n }\n self.items_to_handlers = {\n fields.InputDataFields.image: slim_example_decoder.Image(\n image_key='image/encoded', format_key='image/format', channels=3),\n fields.InputDataFields.source_id: (\n slim_example_decoder.Tensor('image/source_id')),\n fields.InputDataFields.key: (\n slim_example_decoder.Tensor('image/key/sha256')),\n fields.InputDataFields.filename: (\n slim_example_decoder.Tensor('image/filename')),\n # Object boxes and classes.\n fields.InputDataFields.groundtruth_boxes: (\n slim_example_decoder.BoundingBox(\n ['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/')),\n fields.InputDataFields.groundtruth_area: slim_example_decoder.Tensor(\n 'image/object/area'),\n fields.InputDataFields.groundtruth_is_crowd: (\n slim_example_decoder.Tensor('image/object/is_crowd')),\n fields.InputDataFields.groundtruth_difficult: (\n slim_example_decoder.Tensor('image/object/difficult')),\n fields.InputDataFields.groundtruth_group_of: (\n slim_example_decoder.Tensor('image/object/group_of'))\n }\n if load_instance_masks:\n self.keys_to_features['image/object/mask'] = tf.VarLenFeature(tf.float32)\n self.items_to_handlers[\n fields.InputDataFields.groundtruth_instance_masks] = (\n slim_example_decoder.ItemHandlerCallback(\n ['image/object/mask', 'image/height', 'image/width'],\n self._reshape_instance_masks))\n # TODO: Add label_handler that decodes from 'image/object/class/text'\n # primarily after the recent tf.contrib.slim changes make into a release\n # supported by cloudml.\n label_handler = slim_example_decoder.Tensor('image/object/class/label')\n self.items_to_handlers[\n fields.InputDataFields.groundtruth_classes] = label_handler", "def output_layer(self, features, **kwargs):\n raise NotImplementedError", "def output_layer(self, features, **kwargs):\n raise NotImplementedError", "def dict_to_tf_example(data,\n label_map_dict,\n image_subdirectory,\n ignore_difficult_instances=False):\n img_path = os.path.join(image_subdirectory, data['filename'])\n with tf.gfile.GFile(img_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = PIL.Image.open(encoded_jpg_io)\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n width = int(data['size']['width'])\n height = int(data['size']['height'])\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n difficult_obj = []\n if data.get('object') != None:\n for obj in data.get('object'):\n difficult_obj.append(int(0))\n\n xmin.append(float(obj['bndbox']['xmin']) / width)\n ymin.append(float(obj['bndbox']['ymin']) / height)\n xmax.append(float(obj['bndbox']['xmax']) / width)\n ymax.append(float(obj['bndbox']['ymax']) / height)\n\n class_name = obj['name']\n classes_text.append(class_name.encode('utf8'))\n classes.append(label_map_dict[class_name])\n truncated.append(int(0))\n poses.append('Unspecified'.encode('utf8'))\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.bytes_list_feature(poses),\n }))\n return example", "def tfds_map(self, example):\r\n if len(self.get_labels()) > 1:\r\n example.label = self.get_labels()[int(example.label)]\r\n return example", "def main():\n\n #Initiate argument parser\n parser = argparse.ArgumentParser(\n description=\"LabelMe TensorFlow XML-to-CSV converter\"\n )\n parser.add_argument(\n \"-c\",\n \"--csvInput\",\n help=\"Path to the labels.csv file\",\n type=str,\n )\n\n parser.add_argument(\n \"-l\",\n \"--labelMap\",\n help=\"Path to the label_map.pbtxt file\",\n type=str,\n )\n\n parser.add_argument(\n \"-i\",\n \"--images\",\n help=\"Path to image folder\",\n type=str,\n )\n\n parser.add_argument(\n \"-o\",\n \"--outputFile\",\n help=\"Path to output TFRecord file\",\n type=str\n )\n\n args = parser.parse_args()\n\n #If no input args are given use current working directory\n if args.csvInput is None:\n args.csvInput = os.getcwd() + \"/labels.csv\"\n if args.labelMap is None:\n args.labelMap = os.getcwd() + \"/label_map.pbtxt\"\n if args.images is None:\n args.images = os.getcwd()\n if args.outputFile is None:\n args.outputFile = os.getcwd() + \"/train.record\"\n\n #check if input paths exists\n assert os.path.isdir(args.images)\n assert os.path.isfile(args.csvInput)\n assert os.path.isfile(args.labelMap)\n\n #Initiate TFRecordWriter\n writer = tf.io.TFRecordWriter(args.outputFile)\n \n #Read labels from .csv into pd dataframe\n labels = pd.read_csv(args.csvInput)\n\n #Load the `label_map` from pbtxt file.\n label_map = label_map_util.load_labelmap(args.labelMap)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=90, use_display_name=True\n )\n category_index = label_map_util.create_category_index(categories)\n label_map = {} #Dict resolving class name to class id\n for k, v in category_index.items():\n label_map[v.get(\"name\")] = v.get(\"id\")\n\n #Group labels dataframe by filename\n grouped = split(labels, \"filename\")\n\n #for each filename\n for group in grouped:\n #create a tf_example for each image including all labels\n tf_example = create_tf_example(group, args.images, label_map)\n writer.write(tf_example.SerializeToString())\n\n #Close TFRecordWriter and save to file\n writer.close()\n output_path = os.path.join(os.getcwd(), args.outputFile)\n print(\"Successfully created the TFRecords: {}\".format(args.outputFile))", "def _preprocess(self, features, labels):\n with tf.variable_scope('preprocess'):\n with tf.variable_scope('image'):\n features['image_orig'] = features['image']\n image = tf.image.convert_image_dtype(features['image_orig'],\n dtype=tf.float32)\n if self.mode == ModeKeys.TRAIN:\n images = tf.unstack(image)\n images = [augment_image(img) for img in images]\n image = tf.stack(images)\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.0)\n features['image'] = image\n\n if labels is None:\n return features, None\n\n with tf.variable_scope('label'):\n # TODO(Shancheng): use start token and end token rather constant 0\n # labels for decoder input\n labels['label_input'] = tf.concat([labels['label'][:, -1:],\n labels['label'][:, 0:-1]], axis=1)\n # from text length to training label length\n labels['length'] = tf.reshape(labels['length'], [-1])\n labels['length'] = labels['length'] + 1\n\n return features, labels", "def floortype_classifier_model_fn(features, labels, mode):\n # Input Layer\n # Reshape X to 4-D tensor: [batch_size, width, height, channels]\n input_layer = tensorflow.reshape(features[\"input\"], [-1, IMAGE_SIZE, IMAGE_SIZE, 3], name=\"input\")\n input_layer = tensorflow.cast(input_layer, tensorflow.float32)\n\n # Convolutional Layer #1\n # Computes 32 features using a 5x5 filter with ReLU activation.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 80, 80, 3]\n # Output Tensor Shape: [batch_size, 80, 80, 16]\n\n # # Pre tensorflow 1.0 per https://www.tensorflow.org/versions/r1.3/install/migration\n\n # Tensorflow 1.2.1\n conv1 = tensorflow.layers.conv2d(\n inputs=input_layer,\n filters=32,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tensorflow.nn.relu)\n # print(\"IN floortype_classifier_model_fn AFTER conv1\")\n\n # Pooling Layer #1\n # First max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 80, 80, 32]\n # Output Tensor Shape: [batch_size, 40, 40, 32]\n pool1 = tensorflow.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n # Convolutional Layer #2 and Pooling Layer #2\n # Computes 64 features using a 5x5 filter.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 40, 40, 32]\n # Output Tensor Shape: [batch_size, 40, 40, 64]\n conv2 = tensorflow.layers.conv2d(\n inputs=pool1,\n filters=64,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tensorflow.nn.relu)\n\n # Pooling Layer #2\n # Second max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 40, 40, 64]\n # Output Tensor Shape: [batch_size, 20, 20, 64]\n pool2 = tensorflow.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n # Flatten tensor into a batch of vectors\n # Input Tensor Shape: [batch_size, 20, 20, 64]\n # Output Tensor Shape: [batch_size, 20 * 20 * 64]\n pool2_flat = tensorflow.reshape(pool2, [-1, 20 * 20 * 64])\n\n # Dense Layer\n # Densely connected layer with 514 neurons\n # Input Tensor Shape: [batch_size, 20 * 20 * 64]\n # Output Tensor Shape: [batch_size, 1024]\n dense = tensorflow.layers.dense(inputs=pool2_flat, units=1024, activation=tensorflow.nn.relu)\n\n # Add dropout operation; 0.4 probability that element will be kept\n dropout = tensorflow.layers.dropout(\n inputs=dense, rate=0.6, training=mode == tensorflow.estimator.ModeKeys.TRAIN)\n\n # Logits layer (unit==2 since there are two classes - TILE, CARPET)\n # Input Tensor Shape: [batch_size, 1024]\n # Output Tensor Shape: [batch_size, 2]\n logits = tensorflow.layers.dense(inputs=dropout, units=2)\n\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tensorflow.argmax(input=logits, axis=1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tensorflow.nn.softmax(logits, name=\"softmax_tensor\")\n }\n if mode == tensorflow.estimator.ModeKeys.PREDICT:\n return tensorflow.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n loss = tensorflow.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tensorflow.estimator.ModeKeys.TRAIN:\n optimizer = tensorflow.train.GradientDescentOptimizer(learning_rate=LEARNING_RATE)\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tensorflow.train.get_global_step())\n return tensorflow.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tensorflow.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])}\n return tensorflow.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)", "def space_to_batch(images, labels, tiles, n_tiles, paddings_image, paddings_tiles, shape_padded_image, shape_padded_label, shape_input, shape_output, b_with_labels=False, b_verbose=False):\n\n # map parse function to each zipped element\n print(paddings_tiles, shape_padded_label, shape_output)\n assert any([a % b <= 0 for a, b in zip(shape_padded_label, shape_output)])\n\n paddings_both = [a + b for a, b in zip(paddings_image, paddings_tiles)]\n shape_padded_both = [a + 2 * b for a, b in zip(shape_padded_image, paddings_tiles)]\n scale_factor = [float(a/b) for a, b in zip(shape_padded_both, shape_padded_image)]\n\n paddings_labels = [(x, x) for x in paddings_tiles] + [(0, 0)]\n paddings_both = [(x, x) for x in paddings_both] + [(0, 0)]\n\n if b_verbose:\n print('Padding/ padding_img: ', paddings_labels, paddings_both, scale_factor)\n logging.info('Using %d patches to predict a whole image', n_tiles)\n\n # process labels into patches\n if b_with_labels:\n # print('labels prior: ', labels)\n labels = tf.pad(labels, paddings_labels)\n labels = tf.expand_dims(labels, axis=0)\n batch_shape = tf.stack([n_tiles, *shape_output, tf.shape(labels)[-1]])\n labels = tf.reshape(labels, batch_shape)\n # print('labels post: ', labels)\n\n # process images into patches\n # Note: a simple reshape is not possible due to the overlapping of inputs\n # map_fn or tf while_loops or sth similar might help\n images = tf.pad(images, paddings_both)\n if b_verbose:\n images = tf.Print(images, [tf.shape(images), tiles], 'Temporary patch shape - before: ', summarize=5)\n\n patches = [None for _ in range(n_tiles)]\n # patch_indices = list(range(n_tiles))\n positions = [None for _ in range(n_tiles)]\n offset_image = [int(x / 2) for x in shape_input]\n idx_tile = 0\n for idx_0 in range(tiles[0]):\n for idx_1 in range(tiles[1]):\n for idx_2 in range(tiles[2]):\n start_pos = [shape_output[0] * idx_0, shape_output[1] * idx_1, shape_output[2] * idx_2, 0]\n positions[idx_tile] = [float(a + b) for a, b in zip(start_pos[0:3], offset_image)]\n patches[idx_tile] = tf.slice(images, start_pos, shape_input + [tf.shape(images)[-1]])\n idx_tile += 1\n # images = tf.Print(images, [tf.shape(images), idx_0, idx_1, idx_2, start_pos], 'performed crop at: ')\n\n if b_verbose:\n patches[0] = tf.Print(patches[0], [tf.shape(patches[0])], 'Temporary patch shape - within: ', summarize=5)\n images = tf.stack(patches, axis=0)\n\n positions_t = tf.stack(positions, axis=0)\n positions_t = tf.cast(tf.multiply((tf.divide(positions_t, shape_padded_both) - 0.5) * 2, scale_factor), dtype=tf.float32) # rescale it | account for larger padded size\n if b_verbose:\n images = tf.Print(images, [tf.shape(images)], 'Temporary patch shape - after: ', summarize=5)\n\n return images, labels, positions_t", "def tfds_map(self, example):\n if len(self.get_labels()) > 1:\n example.label = self.get_labels()[int(example.label)]\n return example" ]
[ "0.5979642", "0.56935304", "0.56794995", "0.563562", "0.56328654", "0.56254506", "0.56227946", "0.5580164", "0.5480045", "0.54260916", "0.53929067", "0.537351", "0.5349937", "0.5343901", "0.5273858", "0.5248865", "0.523017", "0.5227214", "0.52092975", "0.5198775", "0.517194", "0.51489407", "0.51489407", "0.5125796", "0.5095138", "0.50807035", "0.5080217", "0.50743496", "0.5074148", "0.50526464" ]
0.8608024
0
Testing when ideal file with different numbers in input and file with equal numbers
def test_ideal_file_and_file_with_zeros(file_name, result): assert find_maximum_and_minimum(file_name) == result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fail_check(version, num):\n f1 = open(\"replace/outputs/t\" + str(num), 'r')\n f2 = open(\"replace/outputs/v\" + str(version) + \"/t\" + str(num), 'r')\n ret = f1.readlines() != f2.readlines()\n f1.close()\n f2.close()\n return ret", "def test_big_file_inversions_num():\n\n with open('tests/test_data/IntegerArray.txt') as f:\n content_nums = [int(num) for num in f]\n inversions_num = get_inversions_num(content_nums, 0, len(content_nums))\n expected_res = 2407905288\n assert inversions_num == expected_res", "def test_read_input_file(self):\n\n test_max_digit = 2\n tuple1 = self.test_raw_tuple\n tuple2, max_digit = read_input_file(self.test_drug_info_file)\n self.assertEqual(tuple1, tuple2)\n self.assertAlmostEqual(max_digit,test_max_digit)", "def testInput(filename, expected):\n print(\"\\n------- Test \" + filename + \" -------\\n\")\n found = 0\n graphList = Graph.fromFile(filename)\n for graph in graphList:\n found += testOnGraph(graph)\n print(\"\\nEuler circuits expected: \" + str(expected) + \", found: \" + str(found))\n return found", "def compare_values(filename1, filename2, decimal_places):\r\n roots_we_found = obtained_roots(filename1, decimal_places)\r\n print('Number of our implementation Homotopy Roots : {}'.format(len(roots_we_found)))\r\n roots_by_other_implementation = checking_roots(filename2, decimal_places)\r\n print('Number of roots by Julia Implementation : {}'.format(len(roots_by_other_implementation)))\r\n number_of_different_roots=0\r\n numer_of_similar_roots=0\r\n same_result = []\r\n for i in range(len(roots_we_found)): \r\n for j in range(len(roots_by_other_implementation)):\r\n if len(set(roots_we_found[i]) & set(roots_by_other_implementation[j])) != len(roots_we_found[i]):\r\n number_of_different_roots +=1\r\n else:\r\n numer_of_similar_roots += 1\r\n same_result.append(set(roots_we_found[i]) & set(roots_by_other_implementation[j]))\r\n print('Number of similar roots found : {}'.format(numer_of_similar_roots))\r\n return same_result", "def test(self, filename):\n hit = 0\n total = 0\n n = self.n\n for sent in open(filename):\n samp = sent.rstrip('\\n')\n# samp = '~' + samp + '~' \n for i in range(len(samp) - n):\n total = total + 1\n prev = samp[i:i + n - 1]\n pred = self.pred(prev)\n if pred == samp[i + n - 1]:\n hit = hit + 1\n \n return hit/total", "def test_two_files():\n\n out_file = ''.join(\n random.choices(string.ascii_uppercase + string.digits, k=5))\n try:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n rv, out = getstatusoutput(f'{prg} -f {tair} {amigo} -o {out_file}')\n assert rv == 0\n assert re.search('1: tair_heat.txt', out)\n assert re.search('2: amigo_heat.txt', out)\n assert re.search(\n f'Wrote 20 gene IDs from 2 files to file \"{out_file}\"', out)\n assert os.path.isfile(out_file)\n exp_two = '\\n'.join(\n sorted(\"\"\"\n AT5G12020 AT3G06400 AT2G33590 AT1G54050 AT5G67030 AT4G14690 AT1G16030 AT5G03720 AT3G10800 \n AT5G12140 AT1G64280 AT3G24500 AT3G09440 AT3G04120 AT4G19630 AT1G16540 AT2G22360 AT1G13930 \n AT5G41340 AT3G24520\n \"\"\".split()))\n assert open(out_file).read().strip() == exp_two.strip()\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def equal_file_sum(file1_paht, file2_paht):\n md5_sum1 = generate_sum(file1_path)\n md5_sum2 = generate_sum(file2_path)\n return (md5_sum1 == md5_sum2)", "def __compare_files(self, filename1, filename2):\n self.assertTrue(os.path.isfile(filename1))\n self.assertTrue(os.path.isfile(filename2))\n self.assertEqual(os.path.getsize(filename1), os.path.getsize(filename2))\n with open(filename1, \"rb\") as f1:\n with open(filename2, \"rb\") as f2:\n n_blocks = int(self.args.size) // self.max_block_size\n for i in range(n_blocks):\n self.assertEqual(f1.read(self.max_block_size), \\\n f2.read(self.max_block_size))\n remaining = int(self.args.size) % self.max_block_size\n if remaining > 0:\n self.assertEqual(f1.read(remaining), \\\n f2.read(remaining))", "def cmp(f1, f2):\n with open(f1) as f1, open(f2) as f2:\n return f1.read() == f2.read()", "def checkdifferences(oldfile, changelist, num):\n if num == 1: #combining the unique values of a list & file into 1 list\n newcontent = changelist\n currentcontent = csv_read('{}.csv'.format(oldfile)) # ReadyForAck\n combined = combinelists(currentcontent, newcontent)\n return combined\n if num == 2: # combine the unique values of 2 files into 1 list\n currentcontent = csv_read('{}.csv'.format(changelist)) #clientlist\n combined = []\n for each in currentcontent:\n # for elk in each:\n combined + each\n newlst = combinelists(currentcontent, combined)\n return newlst\n if num == 3: # removing the doubles from each list\n currentcontent = csv_read('{}.csv'.format(oldfile)) # ReadyForAck\n changecontent = changelist\n newlist = dividelists(currentcontent, changecontent)\n return newlist", "def are_files_equal(file1, file2):\n input_file_1 = open(file1, \"r\")\n input_file_2 = open(file2, \"r\")\n\n file1 = input_file_1.read()\n file2 = input_file_2.read()\n print(type(file1), file1, type(file2), file2)\n\n result =False\n if file1 == file1:\n result = True\n\n input_file_1.close()\n input_file_2.close()\n return result", "def same_file(wavecar1, wavecar2, wavecar3):\n same = False\n if (filecmp.cmp(wavecar1, wavecar2, shallow=False)):\n print(\"Serious problem:: {} and {} are the same\".format(wavecar1, wavecar2))\n same = True\n if (filecmp.cmp(wavecar1, wavecar3, shallow=False)):\n print(\"Serious problem:: {} and {} are the same\".format(wavecar1, wavecar3))\n same = True\n if (filecmp.cmp(wavecar2, wavecar3, shallow=False)):\n print(\"Serious problem:: {} and {} are the same\".format(wavecar2, wavecar3))\n same = True\n\n if same:\n print(\"It seems that you are using same files to do finite difference, exit\")\n print(\"\\tComment the 'same_file' checker if you know what you are doing\")\n raise SystemExit", "def test_unequal(self):\n\n qs = FBO(path=TEST_FILES_ROOT, glob='*.md').order_by('name')\n # There are four of these.\n for a, b in combinations(qs.all(), 2):\n self.assertNotEqual(a, b)", "def test_run(self):\n files = [\n (\"AS1-1.phy_r8s.txt\", \"AS1-1.phy_r8s.txt_2.5.txt\"),\n (\"AS1-3.phy_r8s.txt\", \"AS1-3.phy_r8s.txt_2.5.txt\"),\n (\"AS1-4.phy_r8s.txt\", \"AS1-4.phy_r8s.txt_2.5.txt\"),\n ]\n for file_pair in files:\n input_file = file_pair[0]\n expected_file = file_pair[1]\n infile = self.test_data_path + input_file\n outfile = self.test_data_path + expected_file\n divnum = 2.5\n result = run(infile, divnum)\n\n with open(outfile) as handle:\n expected_result = handle.read()\n self.assertEqual(expected_result, result)", "def do_comparison(found_file, created_file):\n\n fh_f, fh_c, data_f, data_c = get_data(found_file, created_file)\n\n print('Initial found data shape ', data_f.shape)\n print(' and created data shape= ', data_c.shape)\n\n # Compare slice i of created to slice i+1 in found\n if (data_f.shape[0] == 1): # NIRCAM\n data_f = data_f[0, :, :, :]\n if (data_c.shape[0] == 1): # to accept output of mc_4d\n data_c = data_c[0, :, :, :]\n data_c_start = data_c[:-1, :, :]\n data_f_end = data_f[1:, :, :]\n elif (fh_f['SCI'].header['NAXIS'] == 3): # NIRSPEC\n data_c_start = data_c[:-1, :, :]\n data_f_end = data_f[1:, :, :]\n elif (data_f.shape[0] > 1 and fh_f['SCI'].header['NAXIS'] == 4): # MIRI\n # concatenate copies of created data (except for the last frame)\n num_ints = int(fh_f[1].data.shape[0]) # number of integrations\n data_c_start = (np.repeat(data_c[:-1, :, :], num_ints, axis=0))\n data_f_end = data_f[:, 1:, :, :]\n data_c_start = data_c_start.reshape(data_f_end.shape)\n else:\n print(' FATAL ERROR - unsupported instrument')\n\n print('Truncated found data shape ', data_f_end.shape)\n print(' and truncated created data shape= ', data_c_start.shape)\n try:\n assert(data_f_end.shape == data_c_start.shape)\n except AssertionError:\n print(' FATAL ERROR: adjusted found data shape ', data_f.shape, \\\n ' is not the same as adjusted created data shape= ', data_c.shape)\n\n neither = (data_c_start == 0.) & (data_f_end == 0.)\n both = (data_c_start != 0.) & (data_f_end != 0.) # created CR was found\n c_only = (data_c_start != 0.) & (data_f_end == 0.) # created CR not found\n f_only = (data_c_start == 0.) & (data_f_end != 0.) # found CR was not created\n\n try:\n assert(neither.sum() + both.sum() + c_only.sum() + f_only.sum() \\\n == data_c_start.size)\n except AssertionError:\n print('FATAL ERROR: sum of components must equal total number of pixels ')\n\n print(' Within the input dataset cubes:')\n print(' Number of created but not found pixels: ', c_only.sum())\n print(' Number of found but not created pixels: ', f_only.sum())\n print(' Number of pixels that are both found and created: ', both.sum())\n print(' Number of pixels that are neither found nor created: ', neither.sum())\n print(' ')\n print(' The fraction of all pixels that were found only: ', \\\n float(f_only.sum()) / float(data_c_start.size))\n print(' The fraction of all pixels that were created only: ', \\\n float(c_only.sum()) / float(data_c_start.size))\n print(' The fraction of pixels in the created file having cosmic rays:', \\\n float(c_only.sum()) / (data_c_start.shape[-2] * data_c_start.shape[-1]))\n print(' ')\n\n write_files(neither, both, c_only, f_only, fh_c, data_c_start)", "def assert_files_equal(file1, file2, error_msg='file mismatch'):\n\n bufsize = 0x1000\n block_offset = 0\n with open(file1, 'rb') as fp1, open(file2, 'rb') as fp2:\n while True:\n block1 = bytearray(fp1.read(bufsize))\n block2 = bytearray(fp2.read(bufsize))\n if len(block1) < len(block2):\n raise TestException(error_msg + ': file1 shorter than file2')\n elif len(block1) > len(block2):\n raise TestException(error_msg + ': file1 longer than file2')\n\n if block1 != block2:\n for offset, (val1, val2) in enumerate(zip(block1, block2)):\n if val1 != val2:\n # Show the difference\n exception_text = error_msg + ':\\n'\n rounded_offset = offset & ~15\n exception_text += '{:08x} '.format(block_offset +\n rounded_offset)\n for lineoffs in range(16):\n exception_text += '{:02x}'.format(\n block1[rounded_offset + lineoffs])\n\n exception_text += '\\n{:08x} '.format(\n block_offset + rounded_offset)\n for lineoffs in range(16):\n exception_text += '{:02x}'.format(\n block2[rounded_offset + lineoffs])\n\n exception_text += '\\n '\n for lineoffs in range(16):\n if block1[rounded_offset + lineoffs] \\\n != block2[rounded_offset + lineoffs]:\n exception_text += '^^'\n else:\n exception_text += ' '\n\n raise TestException(exception_text)\n\n if not block1:\n return\n\n block_offset += len(block1)", "def test_true_false_cases(file_with_true_and_false_value, result, inp):\n assert read_magic_number(file_with_true_and_false_value) == result", "def check_correctness(self):\n\n with open(self.output_file, 'r') as output_file, open(self.gt_file, 'r') as gt_file:\n\n out_lines = output_file.readlines()\n gt_lines = gt_file.readlines()\n\n # Check for inequality\n if len(out_lines) != len(gt_lines):\n return 0\n\n # Check for inequality\n for i in range(len(out_lines)):\n out_split = out_lines[i].split()\n gt_split = gt_lines[i].split()\n\n if len(out_split) != len(gt_split):\n return 0\n\n for j in range(len(out_split)):\n # Treat slur and tie as equivalent\n if out_split[j] != gt_split[j] and\\\n ('slur' not in out_split[j] and 'tie' not in out_split[j]) and\\\n ('slur' not in gt_split[j] and 'tie' not in gt_split[j]):\n return 0\n\n return 1", "def _generate_mismatches_pairs(self):\n tmptmp = ['00061', '10285', '00074', '10156', '10041', '20344', '10041', '20344', '10041', '20344', '10217', '20345', '20324', '20345', '20344',\n '10268', '20345', '20481', '20394', '00074', '20412', '10014', '20436', '20412', '30604', '10218']\n for i, name in enumerate(self.remaining):\n self.update_remaining()\n del self.remaining[i] # deletes the file from the list, so that it is not chosen again\n other_dir = random.choice(self.remaining)\n with open(self.pairs_filepath, \"a\") as f:\n for _ in range(self.num_random_images_per_folder):\n\n if name in tmptmp:\n print()\n if other_dir in tmptmp:\n print()\n\n temps_file_1 = os.listdir(os.path.join(self.data_dir, name))\n if temps_file_1:\n file1 = random.choice(temps_file_1)\n\n temps_file_2 = os.listdir(os.path.join(self.data_dir, other_dir))\n if temps_file_2:\n file2 = random.choice(temps_file_2)\n\n if temps_file_1 and temps_file_2:\n if self.img_ext in file1 and self.img_ext in file2:\n print(\"For '\" + self.data_dir + \"' and counter: \", self.counter, ', MisMatch Pair:',\n name + \" \" + file1.split(self.separator)[-1] + ' ' +\n other_dir + ' ' + file2.split(self.separator)[-1])\n\n f.write(name + \"\\t\" + file1.split(self.separator)[-1] + \"\\t\" + other_dir + \"\\t\" +\n file2.split(self.separator)[-1] + \"\\n\")\n\n self.counter += 1", "def test_process_fastq_single_end_read_file_w_defaults_v180(self):\r\n actual = process_fastq_single_end_read_file(self.fastq2,\r\n self.barcode_fastq2,\r\n self.barcode_map1,\r\n min_per_read_length_fraction=0.45)\r\n actual = list(actual)\r\n expected = self.fastq2_expected_default\r\n self.assertEqual(len(actual), len(expected))\r\n for i in range(len(expected)):\r\n np.testing.assert_equal(actual[i], expected[i])", "def _assert_file_count_equal(self, expected_count):\n assert len(BlockStructureModel._get_all_files(self.usage_key)) == expected_count", "def check_bit_exactness(input_raw_file):\n (t1, f1) = interpolate(input_raw_file, 'cpu_nn_lena.dat', 'cpu', 1, 'nn', 8000, 4000)\n (t2, f2) = interpolate(input_raw_file, 'gpu_nn_lena.dat', 'gpu', 1, 'nn', 8000, 4000)\n (t3, f3) = interpolate(input_raw_file, 'cpu_bl_lena.dat', 'cpu', 1, 'bl', 8000, 4000)\n (t4, f4) = interpolate(input_raw_file, 'gpu_bl_lena.dat', 'gpu', 1, 'bl', 8000, 4000)\n\n if filecmp.cmp(f1, f2, shallow=True):\n print(\"NN interpolation on GPU is bit exact with CPU\")\n if filecmp.cmp(f3, f4, shallow=True):\n print(\"Bilinear interpolation on GPU is bit exact with CPU\")", "def test_process_fastq_single_end_read_file_no_barcode(self):\r\n actual = process_fastq_single_end_read_file_no_barcode(\r\n self.fastq1,\r\n 's1',\r\n min_per_read_length_fraction=0.45)\r\n actual = list(actual)\r\n expected = self.fastq1_expected_single_barcode\r\n self.assertEqual(len(actual), len(expected))\r\n for i in range(len(expected)):\r\n np.testing.assert_equal(actual[i], expected[i])", "def check_same_objects(self, directory_name: str, temp_file: File) -> int:\n number = 0\n if directory_name in self.possibilities:\n files = self.possibilities[directory_name].files\n for file in files:\n if not isinstance(file, PlaceHolderFile) and file == temp_file:\n number = file.get_next_number()\n return number", "def test_process_fastq_single_end_read_file_w_defaults(self):\r\n actual = process_fastq_single_end_read_file(self.fastq1,\r\n self.barcode_fastq1,\r\n self.barcode_map1,\r\n min_per_read_length_fraction=0.45)\r\n actual = list(actual)\r\n expected = self.fastq1_expected_default\r\n self.assertEqual(len(actual), len(expected))\r\n for i in range(len(expected)):\r\n np.testing.assert_equal(actual[i], expected[i])", "def test_printdiff(self):\n\n # Testing different string input options\n assert printdiff(self.data('arange.fits'),\n self.data('blank.fits')) is None\n assert printdiff(self.data('arange.fits'),\n self.data('blank.fits'), ext=0) is None\n assert printdiff(self.data('o4sp040b0_raw.fits'),\n self.data('o4sp040b0_raw.fits'),\n extname='sci') is None\n\n # This may seem weird, but check printdiff to see, need to test\n # incorrect second file\n with pytest.raises(IOError):\n printdiff('o4sp040b0_raw.fits', 'fakefile.fits', extname='sci')\n\n # Test HDU object inputs\n with fits.open(self.data('stddata.fits'), mode='readonly') as in1:\n with fits.open(self.data('checksum.fits'), mode='readonly') as in2:\n\n assert printdiff(in1[0], in2[0]) is None\n\n with pytest.raises(ValueError):\n printdiff(in1[0], in2[0], ext=0)\n\n assert printdiff(in1, in2) is None\n\n with pytest.raises(NotImplementedError):\n printdiff(in1, in2, 0)", "def test_printdiff(self):\n\n # Testing different string input options\n assert printdiff(self.data(\"arange.fits\"), self.data(\"blank.fits\")) is None\n assert (\n printdiff(self.data(\"arange.fits\"), self.data(\"blank.fits\"), ext=0) is None\n )\n assert (\n printdiff(\n self.data(\"o4sp040b0_raw.fits\"),\n self.data(\"o4sp040b0_raw.fits\"),\n extname=\"sci\",\n )\n is None\n )\n\n # This may seem weird, but check printdiff to see, need to test\n # incorrect second file\n with pytest.raises(OSError):\n printdiff(\"o4sp040b0_raw.fits\", \"fakefile.fits\", extname=\"sci\")\n\n # Test HDU object inputs\n with fits.open(self.data(\"stddata.fits\"), mode=\"readonly\") as in1:\n with fits.open(self.data(\"checksum.fits\"), mode=\"readonly\") as in2:\n assert printdiff(in1[0], in2[0]) is None\n\n with pytest.raises(ValueError):\n printdiff(in1[0], in2[0], ext=0)\n\n assert printdiff(in1, in2) is None\n\n with pytest.raises(NotImplementedError):\n printdiff(in1, in2, 0)", "def compare_readbacks(golden_path,\n readback_path):\n\n errors_cram = 0\n seu_01 = 0\n seu_10 = 0\n mbu_pos = 0\n mbu_neg = 0\n mbu_delta = []\n\n golden = open(golden_path, \"rb\")\n readback = open(readback_path, \"rb\")\n\n golden_array = golden.read()\n readback_array = readback.read()\n print(len(golden_array))\n print(len(readback_array))\n\n for i in range(0, len(golden_array)):\n if golden_array[i] != readback_array[i]:\n gold_byte, = struct.unpack(\"B\", golden_array[i])\n gold_byte_ones = bin(gold_byte).count(\"1\")\n readback_byte, = struct.unpack(\"B\", readback_array[i])\n readback_byte_ones = bin(readback_byte).count(\"1\")\n\n delta = gold_byte_ones - readback_byte_ones\n\n if delta == -1:\n seu_01 += 1\n elif delta == 1:\n seu_10 += 1\n elif delta > 1:\n mbu_pos += 1\n mbu_delta.append(delta)\n print(\"\\n\\n\\n\\n\\n DUPA \\n\\n\\n\\n\\n\")\n elif delta < -1:\n mbu_neg += 1\n mbu_delta.append(delta)\n print(\"\\n\\n\\n\\n\\n DUPA \\n\\n\\n\\n\\n\")\n\n print(gold_byte,\n readback_byte,\n delta)\n\n errors_cram += 1\n\n print(\"\\n\\nseu_01: {0}\\nseu_10: {1}\\nmbu_01: {2}\\nmbu_10: {3}\".format(seu_01, seu_10, mbu_neg, mbu_pos))\n print(mbu_delta)\n golden.close()\n readback.close()\n\n return errors_cram", "def test_split_fasta_diff_num_seqs_per_file(self):\r\n fd, filename_prefix = mkstemp(dir=get_qiime_temp_dir(),\r\n prefix='split_fasta_tests',\r\n suffix='')\r\n close(fd)\r\n infile = ['>seq1', 'AACCTTAA', '>seq2', 'TTAACC', 'AATTAA',\r\n '>seq3', 'CCTT--AA']\r\n\r\n actual = split_fasta(infile, 2, filename_prefix)\r\n\r\n actual_seqs = []\r\n for fp in actual:\r\n actual_seqs += list(open(fp))\r\n remove_files(actual)\r\n\r\n expected = ['%s.%d.fasta' % (filename_prefix, i) for i in range(2)]\r\n # list of file paths is as expected\r\n self.assertEqual(actual, expected)\r\n # building seq collections from infile and the split files result in\r\n # equivalent seq collections\r\n self.assertEqual(\r\n SequenceCollection.from_fasta_records(parse_fasta(infile), DNA),\r\n SequenceCollection.from_fasta_records(parse_fasta(actual_seqs), DNA))" ]
[ "0.6338274", "0.630728", "0.62845033", "0.62252957", "0.61770785", "0.6109089", "0.605709", "0.5963395", "0.58854437", "0.58828866", "0.5870365", "0.58685476", "0.5863105", "0.58447826", "0.5840695", "0.5825992", "0.5807981", "0.57987446", "0.5746747", "0.5708142", "0.56613475", "0.566075", "0.5653494", "0.56487626", "0.56369674", "0.5632716", "0.56319314", "0.56030554", "0.5602449", "0.5591017" ]
0.65065473
0
Add app messages to context.
def messages(request): ctx = {} messages = get_messages(request) if messages: ctx['mesgs'] = messages return ctx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_app(self):\n \n pass", "def get_app_message(self):\n return self.messages[\"app\"].get()", "def init_app(self, app):\n\n if not hasattr(app, 'extensions'):\n app.extensions = {}\n app.extensions['mixconn'] = self\n app.context_processor(lambda: {'mixconn': self})\n self.pool = Pool()", "def add_message(self, request, level, message_template,\n message_context=None, extra_tags=''):\n if 'django.contrib.messages' in settings.INSTALLED_APPS:\n try:\n if message_context is None:\n message_context = {}\n message = render_to_string(message_template,\n message_context).strip()\n if message:\n messages.add_message(request, level, message,\n extra_tags=extra_tags)\n except TemplateDoesNotExist:\n pass", "def propeller_messages(context, *args, **kwargs):\n # Force Django 1.8+ style, so dicts and not Context\n # TODO: This may be due to a bug in Django 1.8/1.9+\n if Context and isinstance(context, Context):\n context = context.flatten()\n context.update({'message_constants': message_constants})\n return render_template_file('propeller/messages.html', context=context)", "def add_message(self, msg):\n self.messages.append(msg)", "def patchMessages():\n import OnlineEnv as Online\n app=Gaudi.ApplicationMgr()\n Configs.AuditorSvc().Auditors = []\n app.MessageSvcType = 'LHCb::FmcMessageSvc'\n if Gaudi.allConfigurables.has_key('MessageSvc'):\n del Gaudi.allConfigurables['MessageSvc']\n msg = Configs.LHCb__FmcMessageSvc('MessageSvc')\n msg.fifoPath = os.environ['LOGFIFO']\n msg.LoggerOnly = True\n msg.doPrintAlways = False\n# msg.OutputLevel = MSG_WARNING\n# msg.OutputLevel = Online.OutputLevel\n msg.OutputLevel = MSG_INFO", "async def create(app=None, config=None, update=None, backend=None):\n\n ctx = Context(\n app=app,\n config=config,\n update=update,\n backend=backend,\n )\n\n if update.type == UpdateType.MSG:\n if update.receiver_type == ReceiverType.MULTI:\n ctx.default_target_id = update.receiver_id\n else:\n ctx.default_target_id = update.sender_id\n\n ctx.sender_key = ctx.get_key_for(sender_id=update.sender_id)\n ctx.receiver_key = ctx.get_key_for(receiver_id=update.receiver_id)\n ctx.sender_here_key = ctx.get_key_for(sender_id=update.sender_id, receiver_id=update.receiver_id)\n\n return ctx", "def messages(self, messages):\r\n\r\n self._messages = messages", "def get_messages_for_app(app):\n\tmessages = []\n\tmodules = \", \".join(['\"{}\"'.format(m.title().replace(\"_\", \" \")) \\\n\t\tfor m in frappe.local.app_modules[app]])\n\n\t# doctypes\n\tif modules:\n\t\tfor name in frappe.db.sql_list(\"\"\"select name from tabDocType\n\t\t\twhere module in ({})\"\"\".format(modules)):\n\t\t\tmessages.extend(get_messages_from_doctype(name))\n\n\t\t# pages\n\t\tfor name, title in frappe.db.sql(\"\"\"select name, title from tabPage\n\t\t\twhere module in ({})\"\"\".format(modules)):\n\t\t\tmessages.append(('Page: ' + title or name, title or name))\n\t\t\tmessages.extend(get_messages_from_page(name))\n\n\n\t\t# reports\n\t\tfor name in frappe.db.sql_list(\"\"\"select tabReport.name from tabDocType, tabReport\n\t\t\twhere tabReport.ref_doctype = tabDocType.name\n\t\t\t\tand tabDocType.module in ({})\"\"\".format(modules)):\n\t\t\tmessages.append(('Report: ' + name, name))\n\t\t\tmessages.extend(get_messages_from_report(name))\n\t\t\tfor i in messages:\n\t\t\t\tif not isinstance(i, tuple):\n\t\t\t\t\traise Exception\n\n\t# workflow based on app.hooks.fixtures\n\tmessages.extend(get_messages_from_workflow(app_name=app))\n\n\t# custom fields based on app.hooks.fixtures\n\tmessages.extend(get_messages_from_custom_fields(app_name=app))\n\n\t# app_include_files\n\tmessages.extend(get_all_messages_from_js_files(app))\n\tmessages.extend(get_messages_from_include_files(app))\n\n\t# server_messages\n\tmessages.extend(get_server_messages(app))\n\treturn messages", "def add_context(self):\n return {}", "def collect(self, app):\n pass", "def messages(self, messages):\n self._messages = messages", "def fetch_background_messages(context):\n try:\n request = context['request']\n except KeyError:\n # It is possible in some cases that the request is not available\n return ''\n\n add_background_messages_to_contrib_messages(request)\n return ''", "def get_messages_for_app(app, deduplicate=True):\n\tmessages = []\n\tmodules = [frappe.unscrub(m) for m in frappe.local.app_modules[app]]\n\n\t# doctypes\n\tif modules:\n\t\tif isinstance(modules, str):\n\t\t\tmodules = [modules]\n\t\tfiltered_doctypes = (\n\t\t\tfrappe.qb.from_(\"DocType\").where(Field(\"module\").isin(modules)).select(\"name\").run(pluck=True)\n\t\t)\n\t\tfor name in filtered_doctypes:\n\t\t\tmessages.extend(get_messages_from_doctype(name))\n\n\t\t# pages\n\t\tfiltered_pages = (\n\t\t\tfrappe.qb.from_(\"Page\").where(Field(\"module\").isin(modules)).select(\"name\", \"title\").run()\n\t\t)\n\t\tfor name, title in filtered_pages:\n\t\t\tmessages.append((None, title or name))\n\t\t\tmessages.extend(get_messages_from_page(name))\n\n\t\t# reports\n\t\treport = DocType(\"Report\")\n\t\tdoctype = DocType(\"DocType\")\n\t\tnames = (\n\t\t\tfrappe.qb.from_(doctype)\n\t\t\t.from_(report)\n\t\t\t.where((report.ref_doctype == doctype.name) & doctype.module.isin(modules))\n\t\t\t.select(report.name)\n\t\t\t.run(pluck=True)\n\t\t)\n\t\tfor name in names:\n\t\t\tmessages.append((None, name))\n\t\t\tmessages.extend(get_messages_from_report(name))\n\t\t\tfor i in messages:\n\t\t\t\tif not isinstance(i, tuple):\n\t\t\t\t\traise Exception\n\n\t# workflow based on app.hooks.fixtures\n\tmessages.extend(get_messages_from_workflow(app_name=app))\n\n\t# custom fields based on app.hooks.fixtures\n\tmessages.extend(get_messages_from_custom_fields(app_name=app))\n\n\t# app_include_files\n\tmessages.extend(get_all_messages_from_js_files(app))\n\n\t# server_messages\n\tmessages.extend(get_server_messages(app))\n\n\t# messages from navbar settings\n\tmessages.extend(get_messages_from_navbar())\n\n\tif deduplicate:\n\t\tmessages = deduplicate_messages(messages)\n\n\treturn messages", "def msg_handler(self, msg):\n self.view.frame.log.append(msg)", "def messages(self, messages):\n\n self._messages = messages", "def messages(self, messages):\n\n self._messages = messages", "def messages(self, messages):\n\n self._messages = messages", "def handle_app_message(self, message):\n to_print = f'\\nMessage recieved from: {message.get_sender()}...\\n'\n to_print += message.data\n to_print += '\\nStar-node command:'\n print(to_print)\n self._log.write_to_log(\n \"Message\", f'Message received from {message.get_sender()} ')", "def add(self, context):\n self._contexts.add(context)", "def push_context(self):\n raise NotImplementedError()", "def app(self, app):\n\n self._app = app", "def handle_app(self, app, **options):\n raise NotImplementedError()", "def push_context(self, ctx):\n self._tpl_context = ctx", "def perform_transfig_for_each_app(self, context, app_name):\n logger.info('Spliting the partial configuraiton for [%s]' % app_name)\n\n context[CTX_KEY_COMMON_COLLECTD_JMX_APP_PREFIX] = app_name\n #initialize contenxt['mbean']\n context[CTX_KEY_COMMON_COLLECTD_JMX_MBEANS_SET] = {}\n inner_chain = ApplicationChainedTransfiguration()\n inner_chain.execute(context)", "async def messages(self, ctx):\n\n\t\tawait self.message_leaderboard(ctx, \"messages\")", "def bind_message_ctx(message: Message, **additional_ctx) -> Iterator[None]:\n ctx = dict(\n kafka_topic=message.topic(),\n kafka_partition=message.partition(),\n kafka_offset=message.offset(),\n )\n ctx.update(additional_ctx)\n muselog.context.bind(**ctx)\n try:\n yield\n finally:\n # Clear only our context.\n # This will clear the `ctx` keys even if their values have been overwritten\n # by our yieldee.\n muselog.context.unbind(*list(ctx.keys()))", "def register_shellcontext(app):\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n return {\"db\": db, \"User\": user.models.User, \"AnalysisRequest\": engine.models.AnalysisRequest}\n\n app.shell_context_processor(shell_context)", "def init_context(self, app: Flask):\n # copy the dict so we can freely ``pop`` things\n config = app.config.get('BACKENDS_CONFIG', {}).get(self.name, {}).copy()\n\n try:\n self.support_mail = config.pop('support_mail')\n except KeyError:\n pass\n\n for key in config.keys():\n logger.warning(\"Ignoring unknown key '%s'\", key,\n extra={'data': {'config': config}})\n\n if self._init_context:\n return self._init_context(app)" ]
[ "0.6293802", "0.59380424", "0.5907387", "0.5889862", "0.5763125", "0.5606945", "0.56038916", "0.5568187", "0.556246", "0.55386215", "0.55042225", "0.54550314", "0.54425555", "0.5426857", "0.54005486", "0.53776485", "0.5357521", "0.5357521", "0.5357521", "0.53525347", "0.53438675", "0.53397727", "0.53334874", "0.5303901", "0.5276267", "0.5237733", "0.52262694", "0.5204322", "0.5181866", "0.51738364" ]
0.6333382
0
Combines Excel spreadsheets of quality control data into a single file called a pickle file. This file is unique to Python, and it is very fast to load once created. This function expects a path to a folder of raw data containing .xlsx files. For example, the path
def pickle_data(path=PATH_TO_RAW_DATA): files = os.listdir(path) xlsx_files = [path+"./"+f for f in files if f[-4:] == 'xlsx'] print("Beginning to read excel sheets...will take a few minutes") df_list = [pd.read_excel(f) for f in xlsx_files] master_df = pd.concat(df_list) master_df.to_pickle(path+"./qc_data.pkl")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_workbook(path):\n wb = openpyxl.load_workbook(path, read_only=True)\n return wb", "def save(self):\n\n date = datetime.utcnow().strftime(\"%Y-%m-%d\")\n directory = '%s/xls/%s/' % (PROJECT_DIR, date)\n _file = directory + '/' + self.xls.name\n if not os.path.exists(directory):\n os.makedirs(directory)\n with open(_file, 'wb+') as destination:\n [destination.write(chunk) for chunk in self.xls.chunks()]\n self.batch(_file)", "def save_xls(self,basepath=''): \n self.generate_xls()\n self.wb.save(basepath+self.filename+'.xls')", "def combine_excel_files(end_producer, step_producer, spec):\n glob.glob(\"excel/*.xlsx\")\n timestr = get_time()\n start_producer = spec['num_of_producers']\n try:\n if not os.listdir('merged-excel-docs'):\n print('Folder empty no need to remove files')\n os.mkdir('merged-excel-docs')\n except FileNotFoundError:\n os.mkdir('merged-excel-docs')\n\n writer = pd.ExcelWriter('merged-excel-docs/combined-result' + timestr + '.xlsx', engine='xlsxwriter')\n for ind_p in range(start_producer, end_producer, step_producer):\n all_data = pd.DataFrame()\n sheetID = str(ind_p)\n for f in glob.glob(\"excel/*.xlsx\"):\n df = pd.read_excel(f, \"P_\" + sheetID)\n all_data = all_data.append(df, ignore_index=True)\n all_data.to_excel(writer, sheet_name=\"P_\" + sheetID)\n writer.save()", "def _pickle_path(self) -> Path:\r\n return self.output_path / \"pickles\"", "def run(xlsx_path, output_dir):\n logger = loggerHandler.Logger(__file__)\n xlsx_dict = bp.build_strings_dict(xlsx_path, logger)\n bp.xlsx_dict_to_strings_file(xlsx_dict, output_dir, logger)", "def export_excel(header, data):\n tmp = NamedTemporaryFile()\n wb = Workbook()\n ws = wb.active\n\n ws.append(header)\n for row in export_data(data, header):\n ws.append(row)\n\n wb.save(tmp.name)\n tmp.seek(0)\n\n return tmp", "def export_all_to_excel(input_hdf5, out_directory_path):\n data_store = pd.HDFStore(input_hdf5) # Opening the HDF5 file\n for each_key in data_store.keys():\n data_store[each_key].to_excel(out_directory_path + each_key + \".xlsx\")\n # '/' missing between folder name and\n # file name because file name already includes it.\n data_store.close()\n\n print(\"-- Dataframes written to Excel files (.xlsx) --\")", "def saveAll(self):\r\n path = saveFile(ftype='xlsx')\r\n writer = pd.ExcelWriter(path)\r\n df = pd.DataFrame(self.saveAll)\r\n df.to_excel(writer, header=False, index=False)\r\n writer.save()\r\n \r\n #Format the excel file\r\n try:\r\n import openpyxl\r\n from openpyxl.styles import Alignment, Font, Border, Side\r\n #Load the workbook and worksheet\r\n wb = openpyxl.load_workbook(filename=path)\r\n ws = wb.get_sheet_by_name(\"Sheet1\")\r\n cells = ['E1','H1','K1','N1','Q1','T1','W1','Z1']\r\n ws.merge_cells('E1:G1')\r\n ws.merge_cells('H1:J1')\r\n ws.merge_cells('K1:M1')\r\n ws.merge_cells('N1:P1')\r\n ws.merge_cells('Q1:S1')\r\n ws.merge_cells('T1:V1')\r\n ws.merge_cells('W1:Y1')\r\n ws.merge_cells('Z1:AB1')\r\n #Bold and center the headers\r\n ft = Font(bold=True)\r\n for cell in cells:\r\n ws[cell].alignment = Alignment(horizontal=\"center\")\r\n ws[cell].font = ft\r\n #Add borders\r\n rows,_ = self.saveAll.shape\r\n for i in range(rows):\r\n for cell in cells:\r\n c = cell[0]+str(i+1)\r\n ws[c].border = Border(left=Side(style='thin'))\r\n\r\n \r\n \r\n wb.save(path)\r\n \r\n except ImportError:\r\n pass", "def test_read_excel_big(test_mp, tmp_path):\n tmp_path /= \"output.xlsx\"\n\n # Write a 25-element parameter with max_row=10 → split across 3 sheets\n scen = ixmp.Scenario(test_mp, **models[\"dantzig\"], version=\"new\")\n add_random_model_data(scen, 25)\n scen.to_excel(tmp_path, items=ixmp.ItemType.MODEL, max_row=10)\n\n # Initialize target scenario for reading\n scen_empty = ixmp.Scenario(test_mp, \"foo\", \"bar\", version=\"new\")\n scen_empty.init_set(\"random_set\")\n scen_empty.init_par(\n \"random_par\", scen.idx_sets(\"random_par\"), scen.idx_names(\"random_par\")\n )\n\n # File can be read\n scen_empty.read_excel(tmp_path)\n\n assert len(scen_empty.par(\"random_par\")) == 25", "def generate_postprocessed_files():\n get_excel_file = pd.ExcelFile('global_output.xlsx')\n get_sheet_names = get_excel_file.sheet_names\n\n writer = pd.ExcelWriter('master_ouput.xlsx', engine='xlsxwriter')\n for sheet in get_sheet_names:\n try:\n all_data = pd.DataFrame()\n sheetID = str(sheet)\n data = pd.read_excel('global_output.xlsx', sheet, dtype={'id': str})\n grouped_data = data.groupby(['Total Producers', 'Correct Producers Ratio', 'Collected Updates Ratio',\n 'Collected Votes Ratio', 'Collected Final Votes Ratio'], as_index=False)[\n 'Total Correct Ln(prod)',\n 'runs', 'Total Correct Ln(vote)',\n 'Runs With All Ln(prod)',\n 'Runs With All Ln(vote)',\n 'Runs With > 50% Correct', 'Runs With = Cn'].sum()\n\n grouped_data['num_correct_producers_Ln_prod'] = grouped_data['Total Correct Ln(prod)'] / grouped_data[\n 'runs']\n grouped_data['num_correct_producers_Ln_vote'] = grouped_data['Total Correct Ln(vote)'] / grouped_data[\n 'runs']\n grouped_data['percentage_for_50_%'] = (grouped_data['Runs With > 50% Correct'] / grouped_data['runs']) * 100\n grouped_data['Percentage Runs With = Cn'] = (grouped_data['Runs With = Cn'] / grouped_data['runs']) * 100\n\n all_data = all_data.append(grouped_data, ignore_index=True)\n\n all_data.to_excel(writer, sheet_name=sheet)\n except KeyError:\n continue\n writer.save()\n print(\"Merged File\")", "def convert_xls_to_xlsx(inp_dict):\n if inp_dict[\"out\"]:\n for fname in inp_dict[\"out\"]:\n excel = EnsureDispatch('Excel.Application')\n fname = os.path.abspath(fname.encode(\"utf-8\"))\n fname = os.path.abspath(fname.decode(\"utf-8\"))\n wb = excel.Workbooks.Open(fname)\n excel.DisplayAlerts = False\n wb.SaveAs(fname+\"x\", FileFormat=51) #FileFormat = 51 is for .xlsx extension\n wb.Close() #FileFormat = 56 is for .xls extension\n excel.Application.Quit()\n excel.DisplayAlerts = True\n inp_dict[\".xlsx\"].append(fname+\"x\")\n inp_dict[\"out\"] = []\n return inp_dict", "def create_workbook(self):\n try:\n if '.xlsm' in self.file_name or '.xltm' in self.file_name:\n self.wb = load_workbook(self.file_path, keep_vba=True)\n else:\n if '.xlsx' not in self.file_name:\n self.file_name = self.file_name + '.xlsx'\n self.wb = load_workbook(os.path.join(self.file_path, self.file_name))\n except Exception as e:\n self.wb = Workbook()\n\n sheet_names = self.wb.sheetnames\n if self.sheet_name in sheet_names:\n self.ws = self.wb[self.sheet_name]\n else:\n self.ws = self.wb.create_sheet(title=self.sheet_name)\n\n if self.data_len < self.ws.max_row:\n self.clear_sheet()\n self.wb.save(os.path.join(self.file_path, self.file_name))\n\n self.write_to_sheet()\n\n self.wb.save(os.path.join(self.file_path, self.file_name))", "def to_xlsx(self, filename):\n # create path if it does not exist\n suffix = filename.split(\".\")[-1]\n if not suffix == \"xlsx\":\n filename = filename + \".xlsx\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n writer = pd.ExcelWriter(filename)\n for name, df in sorted(self.input_data.items()):\n df.to_excel(writer, name)\n writer.save()\n logging.info(\"Scenario saved as excel file to %s\", filename)", "def stage_output(stage_file, excel_filepath):\n # Creates a raw dataframe for the raw excel sheet and a dataframe\n # to be used in the aggregation.\n df_raw, df, site, site_code = _data_reader(stage_file)\n df_raw = df_raw.rename(columns={'Unnamed: 0': ''})\n\n # Seperate data into years and creates an excel file for each year.\n start_year = df.index[0].to_pydatetime()\n start_year = start_year.year\n years = np.arange(start_year, 2017)\n years_str = np.array([str(x) for x in years])\n for year in years_str:\n df_year = df.loc[year]\n df_15_reindex, df_30_reindex, df_1h_reindex, df_1d_reindex = _resampler(\n df_year, year)\n del df_year\n # Saving data to a excel file if to_excel is True.\n save_path = (excel_filepath + site + '_'\n + str(year) + '_stage_data.xlsx')\n\n # Takes raw and each time interval of data and creates a sheet for each.\n writer = pd.ExcelWriter(save_path, engine='xlsxwriter',\n datetime_format='m/d/yyyy h:mm',\n date_format='m/d/yyyy')\n df_raw.to_excel(writer, 'raw_stationID_' + site_code, index=False)\n df_15_reindex.to_excel(writer, '15min', index=False, na_rep='#N/A')\n df_30_reindex.to_excel(writer, '30min', index=False, na_rep='#N/A')\n df_1h_reindex.to_excel(writer, 'hourly', index=False, na_rep='#N/A')\n df_1d_reindex.to_excel(writer, 'daily', index=False, na_rep='#N/A')\n\n # Formatting of the excel sheets. Without format1 the time is saved\n # in decimal form in the excel sheet.\n workbook = writer.book\n format1 = workbook.add_format({'num_format': 'h:mm'})\n worksheet_raw = writer.sheets['raw_stationID_' + site_code]\n worksheet_15 = writer.sheets['15min']\n worksheet_30 = writer.sheets['30min']\n worksheet_1h = writer.sheets['hourly']\n worksheet_1d = writer.sheets['daily']\n worksheets = [worksheet_15, worksheet_30, worksheet_1h, worksheet_1d]\n for worksheet in worksheets:\n worksheet.set_column('A:L', 22)\n worksheet.set_column('D:E', 22, format1)\n worksheet_raw.set_column('A:F', 20)\n writer.save()\n workbook.close()\n\n # Deletes dataframes after each year loop to save memory.\n del df_15_reindex, df_30_reindex, df_1h_reindex, df_1d_reindex\n # Deletes dataframes after the year loop is completed to save memory.\n del df_raw, df\n return", "def mono_sheet(self):\n xls = pandas.read_excel(str(self.source))\n xls.to_csv(str(self.dest), **self.kwargs)", "def __call__(self):\n # Create path if not exists #\n self.path.directory.create_if_not_exists()\n # Create a writer #\n self.writer = pandas.ExcelWriter(str(self.path), engine='xlsxwriter')\n # Create a sheet per every key #\n for key in self.sheet_to_dfs:\n worksheet = self.writer.book.add_worksheet(key)\n self.writer.sheets[key] = worksheet\n # Write each sheet #\n for key in self.sheet_to_dfs: self.write_one_sheet(key)\n # Save #\n self.writer.save()\n # Return #\n return self.path", "def create_output_file(self):\r\n self.output_file = openpyxl.Workbook()", "def add_excel(path=None, names=None):\r\n walker = WalkUserData()\r\n fnamelist = walker.dir_process(1, path, style=\"fnamelist\")", "def preload_data_from_excel(path_to_file):\n with open(path_to_file, \"rb\") as f:\n df = pd.read_excel(f)\n\n return df", "def csv_from_excel(path=os.getcwd()):\n path = path + '/*.xls*'\n files = glob.glob(path)\n\n for i in files:\n file = os.path.basename(i)\n filename = os.path.splitext(file)[0]\n xls_file = pd.ExcelFile(i, index_col=None, dtype=object)\n if len(xls_file.sheet_names) > 1:\n try:\n os.mkdir(filename)\n except OSError:\n print('Could not create directory to output to.')\n for x in xls_file.sheet_names:\n file = pd.read_excel(xls_file, x, index_col=None, dtype=object)\n file.to_csv(filename + '/' + x + '.csv', quoting=1, index=False)\n\n else:\n file = xls_file.parse()\n file.to_csv(filename + '.csv', quoting=1, index=False)", "def split_excel_files(self):\n for x in self.files:\n if x[-4:] not in [\".xls\", \"xlsx\"]:\n continue\n else:\n files = pd.read_excel(x, sheet_name=None)\n for k, v in files.items():\n #get name with the extension stripped\n name = k.split(\".\")[0]\n out_path = x.split(\".\")[0]\n try:\n os.mkdir(out_path)\n except:\n print(\"directory exists\")\n v.to_csv(f\"{out_path}/{name}.csv\", index=False)\n os.remove(x)\n self.files = [os.path.join(dp, f) for dp, dn, fn in os.walk(self.path) for f in fn]\n self.csv_files = [x for x in self.files if x[-3:] == \"csv\"]", "def create_file(employee_list, path):\n employees_dict = create_employee_structure(employee_list)\n return excel_out(employees_dict, path)", "def process_files(geodata_name, inp_dict):\n input_paths = inp_dict[\".xls\"][:]\n try:\n data = geodata(geodata_name)\n except UnicodeDecodeError:\n showerror(\"Ошибка кодирования\", \"Файл данных должен быть закодирован в utf-8\")\n data = geodata(askopenfilenames(initialdir=os.path.abspath(os.getcwd()), filetypes=[(\"Файл данных txt\", \".txt\")], title=\"Выберите файл данных txt\")[0])\n\n\n for book in input_paths:\n book_flag = False\n with open_workbook(book, formatting_info=True) as rb:\n header = False\n wb = copy(rb)\n for numb, sheet in enumerate(rb.sheets()):\n column = \"False\"\n for row in range(sheet.nrows):\n if column != \"False\":\n for data_row in data:\n if sheet.cell(row, column).value == data_row[0]:\n sheet_wb = wb.get_sheet(numb)\n sheet_wb.write(row, sheet.ncols, data_row[1])\n sheet_wb.write(row, sheet.ncols+1, data_row[2])\n break\n else:\n for col in range(sheet.ncols):\n for data_row in data:\n if sheet.cell(row, col).value == data_row[0]:\n column = col\n book_flag = True\n sheet_wb = wb.get_sheet(numb)\n sheet_wb.write(row, sheet.ncols, data_row[1])\n sheet_wb.write(row, sheet.ncols+1, data_row[2])\n if not header:\n header = True\n style_list = get_xlwt_style_list(rb)\n wb.get_sheet(numb).write(0, sheet.ncols, u\"Широта\", style=style_list[sheet.cell_xf_index(0, 0)])\n wb.get_sheet(numb).write(0, sheet.ncols+1, u\"Долгота\", style=style_list[sheet.cell_xf_index(0, 0)])\n break\n if book_flag:\n if not os.path.isdir(\"out\"):\n os.mkdir(\"out\")\n f_out = get_output_name(book)\n wb.save(f_out)\n inp_dict[\"del\"].append(f_out)\n inp_dict[\"out\"].append(f_out)\n return inp_dict", "def save_feedback_xlsx(\n df_summary: pd.DataFrame,\n df_city_no_zip: pd.DataFrame,\n df_zip_no_city: pd.DataFrame,\n df_zipCity_no_address: pd.DataFrame,\n df_address_no_zipCity: pd.DataFrame,\n df_no_address_at_all: pd.DataFrame,\n df_invalid_matrices: pd.DataFrame,\n df_employees: pd.DataFrame,\n path: str,\n):\n full_path = os.path.join(\n path,\n f\"feedback_{dt.datetime.strftime(dt.datetime.now(), '%Y-%m-%d-%H-%M-%S')}.xlsx\",\n )\n writer = pd.ExcelWriter(full_path, engine=\"xlsxwriter\")\n df_summary.to_excel(writer, sheet_name=\"SUMMARY\", index=False)\n df_invalid_matrices.to_excel(writer, sheet_name=\"invalid_matrices\", index=False)\n df_address_no_zipCity.to_excel(writer, sheet_name=\"address_no_zipCity\", index=False)\n df_no_address_at_all.to_excel(writer, sheet_name=\"no_address_at_all\", index=False)\n df_zipCity_no_address.to_excel(writer, sheet_name=\"zipCity_no_address\", index=False)\n df_zip_no_city.to_excel(writer, sheet_name=\"zip_no_city\", index=False)\n df_city_no_zip.to_excel(writer, sheet_name=\"city_no_zip\", index=False)\n df_employees.to_excel(writer, sheet_name=\"employees\", index=False)\n\n for sheet in writer.sheets.values():\n sheet.set_column(\"A:E\", 35)\n\n writer.save()", "def to_pickle(self, path: Union[str, Path]) -> None:\n with open(path, 'wb') as handle:\n pickle.dump(self, handle)", "def driver():\n\n directory = r\"C:/Users/Aftab Alam/Documents/GitHub\"\n directory = directory + r\"/SRM-placement-analyser/data/\"\n fileList = [directory+\"InfosysResult.xlsx\",directory+\"TCSResult.xlsx\",directory+\"CognizantResult.xlsx\",directory+\"WiproResult.xlsx\"]\n \n listOfPlaced = extractCommonData.extractCommonData(fileList)\n createNewExcelSheet(directory,listOfPlaced)", "def write_pickle(data, path):\n with open(path, 'wb') as handle:\n pickle.dump(data, handle)", "def excel_output(df):\n output = io.BytesIO()\n #time = str(date.today())\n #filename = \"output \"+time+\".xlsx\"\n writer = pd.ExcelWriter(output, engine='xlsxwriter', options={'remove_timezone': True})\n #writer.book.filename = io\n df.to_excel(writer,'Sheet1', index=False, header=True)\n writer.save()\n xlsx_data = output.getvalue()\n return xlsx_data", "def write_dataframe_to_excel(d, name, path=''):\n name = name if len(name.split()) == 1 else name\n filepath = join(path, name)\n d.to_excel(filepath)" ]
[ "0.5912495", "0.59047276", "0.58891195", "0.5707316", "0.56871253", "0.5675712", "0.567314", "0.5660493", "0.5642101", "0.55804497", "0.55674464", "0.5544291", "0.55422443", "0.55234957", "0.550397", "0.54869187", "0.54769266", "0.5476461", "0.5475492", "0.5408885", "0.5401098", "0.5389909", "0.5387088", "0.538008", "0.5374509", "0.5362024", "0.536152", "0.53501457", "0.53492755", "0.53443784" ]
0.7810054
0
Checks rotation matrix for d = 2.
def test_d_2(): rs = 10 d = 2 np.random.seed(rs) num = 3 theta = np.random.uniform(0, 2 * math.pi) rotation = np.identity(d) rotation[0, 0] = math.cos(theta) rotation[0, 1] = - math.sin(theta) rotation[1, 0] = math.sin(theta) rotation[1, 1] = math.cos(theta) np.random.seed(rs) rotation_function = mt_obj.calculate_rotation_matrix(d, num) assert(np.all(rotation == rotation_function))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_rotation_matrix(self, R):\n Rt = np.transpose(R)\n shouldBeIdentity = np.dot(Rt, R)\n I = np.identity(3, dtype=R.dtype)\n n = np.linalg.norm(I - shouldBeIdentity)\n return n < 1e-6", "def isRotationMatrix(self, R):\n Rt = np.transpose(R)\n shouldBeIdentity = np.dot(Rt, R)\n I = np.identity(3, dtype=R.dtype)\n n = np.linalg.norm(I - shouldBeIdentity)\n return n < 1e-6", "def verify_legal_rotation(self, direction):\n test_figure = None\n if direction == \"CW\":\n test_figure = self.get_block_positions(self.active_piece.get_cw_rotation())\n elif direction == \"CCW\":\n test_figure = self.get_block_positions(self.active_piece.get_ccw_rotation())\n\n for b_x, b_y in test_figure:\n if b_x < 0 or b_x >= self.WIDTH:\n return False\n\n if b_y < 0 or b_y >= self.HEIGHT:\n return False\n\n if self.board[b_y][b_x] != 0:\n return False\n return True", "def test_asssert_rotation_matrix_behaves_like_check_matrix():\n random_state = np.random.RandomState(2345)\n for _ in range(5):\n a = pr.random_axis_angle(random_state)\n R = pr.matrix_from_axis_angle(a)\n original_value = R[2, 2]\n for error in [0, 1e-8, 1e-7, 1e-5, 1e-4, 1]:\n R[2, 2] = original_value + error\n try:\n pr.assert_rotation_matrix(R)\n pr.check_matrix(R)\n except AssertionError:\n assert_raises_regexp(\n ValueError, \"Expected rotation matrix\", pr.check_matrix, R)", "def check_matrix(self, matrix):\n for i in range(self.size):\n if (matrix[0][i] + matrix[-1][i] == i % 2 or matrix[0][i] + matrix[-1][i] == (i + 1) % 2) and (\n matrix[i][0] + matrix[i][-1] == i % 2 or matrix[i][0] + matrix[i][-1] == (i + 1) % 2):\n pass\n else:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n for i in range(self.size):\n for j in range(self.size):\n if matrix[i][j] > 1:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n logging.info(\"Matrix detected : \" + str(matrix))\n return True", "def _rotations_guard_clauses(R1: Union[list, np.ndarray], R2: Union[list, np.ndarray]) -> None:\n for label, rotation_matrix in zip(['R1', 'R2'], [R1, R2]):\n if not isinstance(rotation_matrix, (list, np.ndarray)):\n raise TypeError(f\"{label} must be an array. Got {type(rotation_matrix)}\")\n r1, r2 = np.copy(R1), np.copy(R2)\n for rotation_matrix in [r1, r2]:\n if rotation_matrix.shape[-2:] != (3, 3):\n raise ValueError(f\"Rotation matrices must be of shape (N, 3, 3) or (3, 3). Got {rotation_matrix.shape}.\")\n r1_shape, r2_shape = r1.shape, r2.shape\n if r1_shape != r2_shape:\n raise ValueError(f\"Cannot compare R1 of shape {r1_shape} and R2 of shape {r2_shape}.\")", "def test_check_matrix_threshold():\n R = np.array([\n [-9.15361835e-01, 4.01808328e-01, 2.57475872e-02],\n [5.15480570e-02, 1.80374088e-01, -9.82246499e-01],\n [-3.99318925e-01, -8.97783496e-01, -1.85819250e-01]])\n pr.assert_rotation_matrix(R)\n pr.check_matrix(R)", "def test_skel_rotation_fail(self):\n cmds.file(f=1, new=1)\n cmds.mayaUSDImport(file=self.skel_file, ani=1)\n\n values = cmds.keyframe('joint1.rx', q=1, vc=1)\n self.assertNotAlmostEqual(0.0, values[-1])", "def goodDLK_2(d,l,k) :\n if (d == 0) and ((l != 0) or (k != 3)) :\n return False\n return ((2*l + k - (3*d -1)) % 4 in [0,3])", "def checkDiag(mat, i, j):\n res = mat[i][j]\n i += 1\n j += 1\n while i < col and j < row:\n if mat[i][j] != res:\n return False\n i += 1\n j += 1\n return True", "def symmetric2dTest(matrix2d):\n \n # is the matrix 2-d?\n if len(np.shape(matrix2d)) != 2:\n raise ValueError(\"Matrix dimensions are not equal to 2.\")\n matrix2d = np.array(matrix2d)\n\n # create boolean for whether 2-d matrix = its transpose\n symmBool = (matrix2d == matrix2d.T).all()\n \n\n if symmBool == False:\n print(\"Matrix not symmetric.\")\n print(\"Max assymetry = \",np.max(matrix2d-matrix2d.T))\n\n return symmBool", "def test_rotation_isometry(self):\n import numpy\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 11, -2):\n \n s = space(curvature=k)\n\n # use a small enough magnitude to not break math for very negative K\n magic = 0.33377777373737737777\n # 1/sqrt(2)\n s2_ref = 0.707106781186547524400844362104785\n\n o = s.make_origin(2)\n p = s.make_point((1, 0), magic)\n q = s.make_point((s2_ref, s2_ref), magic)\n\n rot = space_point_transform(\n numpy.array([[1,0,0],[0,s2_ref,-s2_ref],[0,s2_ref,s2_ref]]),\n curvature=k,\n math = common_math\n )\n\n f, g, i = map(space_point_transform, (p, q, o))\n\n def check_transform_eq(t1, t2, invert=False):\n for ref in (\n s.make_point((5/13, 12/13), magic),\n s.make_point((-3/5, 4/5), magic)\n ):\n self.assertTrue(invert ^ point_isclose(\n t1(ref),\n t2(ref),\n abs_tol = 1e-12\n ))\n\n # 1/8 turn, times 8\n check_transform_eq(rot*8, i)\n\n # rotate, shift, rotate\n check_transform_eq(g, rot + f + rot * -1)\n\n # the other way\n check_transform_eq(f, rot * -1 + g + rot)", "def assertDiagonal(curRunMatrix):\n\tnRow = len(curRunMatrix)\n\n\t# Ignore if only two runs as a 2x2 matrix does not have a diagonal\n\tif nRow == 2:\n\t\treturn\n\tfor i in range(0,nRow):\n\t\tassert(curRunMatrix[i][i] != 0), \"Metadata file contains file that was not present in score file. Remove from metadata file. %s line\" %(i)\n\treturn", "def test_rotate_down(self):\n # Testing 'down' rotation clockwise\n side = 'D'\n rotation = list(self.cube.rotate_cube(side))\n result = [np.array([['g', 'g'], ['r', 'r']], dtype='<U1'),\n np.array([['y', 'y'], ['y', 'y']], dtype='<U1'),\n np.array([['o', 'o'], ['g', 'g']], dtype='<U1'),\n np.array([['w', 'w'], ['w', 'w']], dtype='<U1'),\n np.array([['b', 'b'], ['o', 'o']], dtype='<U1'),\n np.array([['r', 'r'], ['b', 'b']], dtype='<U1')]\n\n np.testing.assert_array_equal(rotation, result)", "def _findrotationmatrix(ccdata1, ccdata2):\n natoms = ccdata1.natom\n J = np.zeros((3, 3), dtype=np.float)\n\n for i in range(natoms):\n J += np.outer(ccdata1.atomcoords[0][i], ccdata2.atomcoords[0][i])\n\n U, s, V = np.linalg.svd(J)\n\n R = np.transpose(np.dot(V, np.transpose(U)))\n\n return R", "def square2_checker(self, x, y, row2, col2):\n \n self.x = x\n self.y = y\n self.row2 = row2\n self.col2 = col2\n\n return abs(self.x - self.row2) == 1 and self.col2 == self.y \\\n or abs(self.y - self.col2) == 1 and self.row2 == self.x", "def rotate_2d_matrix(matrix):\n n = len(matrix)\n\n def rotate(i, j, step):\n if step < 4:\n rotate(j, n - 1 - i, step + 1)\n matrix[i][j] = matrix[n - 1 - j][i]\n if step == 0:\n matrix[i][j] = val\n\n for i in range(n // 2):\n for j in range(i, n - 1 - i):\n val = matrix[n - 1 - j][i]\n rotate(i, j, 0)", "def rotate(matrix):\n n, = np.shape(matrix)\n x = np.zeros(n, ) # Column vector of unknown\n\n \"\"\"\n Reduction of the matrix to\n a triangular form\n \"\"\"\n for i in range(0, n):\n for j in range(i + 1, n):\n a = matrix[i, i]\n b = matrix[j, i]\n c = a / m.sqrt(a * a + b * b)\n s = b / m.sqrt(a * a + b * b)\n for k in range(i, n + 1):\n t = matrix[i, k]\n matrix[i, k] = (c * matrix[i, k]) + (s * matrix[j, k])\n matrix[j, k] = (-s * t) + (c * matrix[j, k])\n\n \"\"\"\n Back stroke from the Gauss method\n \"\"\"\n for i in range(n - 1, -1, -1):\n summ = 0\n for j in range(i + 1, n):\n summ += matrix[i, j] * x[j]\n summ = matrix[i, n] - summ\n if matrix[i, i] == 0:\n return False\n x[i] = summ / matrix[i, i]\n\n i = 0\n while i < len(x):\n x[i] = int((x[i] * 10000) + 0.5) / 10000\n i += 1\n\n \"\"\"\n Vector of discrepancy (Ax - B)\n \"\"\"\n a, b = create_matrix_txt(form='normal')\n discrep = np.dot(a, x)\n discrep = discrep - b\n\n print(\"Method of rotation:\\n\")\n print(\"Vector discrepancy: \", discrep)\n print(\"Vector x: \", x, \"\\n\")\n\n return x", "def test_revolute_from_dh(self):\n x_offset = 1\n z_offset = 2\n # Rotate around the z axis\n r = Joint.revolute_from_dh(0, 0, x_offset, z_offset)\n t_mat = r(np.array([np.pi / 2]))\n rot_vec = np.dot(t_mat[:3, :3], np.array([1, 0, 0]))\n self.assertTrue(np.allclose(\n rot_vec, np.array([0, 1, 0]), rtol=1e-5, atol=1e-5))\n self.assertTrue(np.allclose(t_mat[2, 3], z_offset))\n # x was rotated 90 degrees, and is now y\n self.assertTrue(np.allclose(t_mat[1, 3], x_offset))", "def is_orientation_ok(image,k=2,is_first=True):\n\n mid_x, mid_y = int(0.5*image.shape[1]), int(0.5*image.shape[0])\n\n # Get moment for first body half \n image_0 = np.array(image)\n image_0[:,:int(mid_x)] = 0\n image_0 = image_0[:,int(mid_x):]\n moment_0 = get_moment(image_0,k)\n\n # Get moment for second body half\n image_1 = np.array(image)\n image_1[:,int(mid_x):] = 0\n image_1 = np.fliplr(image_1)\n image_1 = image_1[:,int(mid_x):]\n moment_1 = get_moment(image_1,k)\n\n # Compute descriminant and flip flag\n discrim = (moment_0 - moment_1)/(moment_0 + moment_1)\n if discrim < 0:\n ok = False\n else:\n ok = True \n return ok, discrim", "def isDiagonal(self):\n raise Exception('Deprecated')\n return self.direction % 2 == 1", "def _check_rotation(spec_nest, path):\n spec = _get_from_nest(spec_nest, path)\n if spec is not None and not isinstance(spec, primitives_pb2.RotationType):\n raise InvalidSpecError(\n f'{\"/\".join(path)} was expected to be of type Rotation, but is instead '\n f'{type(spec)}')", "def check_diag(self):\r\n if self.grid[4][-1] != ' ':\r\n if self.grid[0][-1] == self.grid[4][-1] and self.grid[4][-1] == self.grid[8][-1]:\r\n return (4, (self.grid[0], self.grid[8]))\r\n elif self.grid[2][-1] == self.grid[4][-1] and self.grid[4][-1] == self.grid[6][-1]:\r\n return (4, (self.grid[2], self.grid[6]))\r\n return (-1, None)", "def match(cube):\n \n #M1'\n M1 = (cube[1,1,0] & cube[1,1,1] & \n (not cube[0,0,2]) & (not cube[1,0,2]) & (not cube[2,0,2]) &\n (not cube[0,1,2]) & (not cube[1,1,2]) & (not cube[2,1,2]) &\n (not cube[0,2,2]) & (not cube[1,2,2]) & (not cube[2,2,2]));\n if M1:\n return True;\n \n # gerate rotations around z/vertical axis\n cuberots = [rotate(cube, axis = 2, steps = rot) for rot in range(4)];\n #print('Cube rotations:');\n #[printCube(c) for c in cuberots] \n \n # M2' and all rotations\n for curo in cuberots:\n M2 = (curo[1,1,0] & curo[1,1,1] & curo[1,2,1] &\n (not curo[0,0,2]) & (not curo[1,0,2]) & (not curo[2,0,2]) &\n (not curo[0,1,2]) & (not curo[1,1,2]) & (not curo[2,1,2]));\n if M2:\n return True;\n \n # M3' and all rotations\n for curo in cuberots:\n M3 = (curo[1,1,0] & curo[1,1,1] & curo[1,2,1] & curo[2,1,1] &\n (not curo[0,0,2]) & (not curo[1,0,2]) &\n (not curo[0,1,2]) & (not curo[1,1,2]));\n if M3:\n return True;\n \n # M4' and all rotations\n for curo in cuberots:\n M4 = (curo[1,1,0] & curo[1,1,1] & curo[2,2,1] & curo[2,2,2] &\n (not curo[0,0,2]) & (not curo[1,0,2]) & (not curo[2,0,2]) &\n (not curo[0,1,2]) & (not curo[1,1,2]) & (not curo[2,1,2]) &\n (not curo[0,2,2]) & (not curo[1,2,2]));\n if M4:\n return True;\n \n # M5' and all rotations\n for curo in cuberots:\n M5 = (curo[1,2,0] & curo[1,1,1] & \n (not curo[0,0,0]) & (not curo[1,0,0]) & (not curo[2,0,0]) &\n (not curo[1,1,0]) &\n (not curo[0,0,1]) & (not curo[1,0,1]) & (not curo[2,0,1]) &\n (not curo[0,0,2]) & (not curo[1,0,2]) & (not curo[2,0,2]) &\n (not curo[0,1,2]) & (not curo[1,1,2]) & (not curo[2,1,2]) &\n (not curo[0,2,2]) & (not curo[1,2,2]) & (not curo[2,2,2]));\n if M5:\n return True;\n \n # M6' and all rotations\n for curo in cuberots:\n M6 = (curo[2,1,0] & curo[1,2,0] & curo[1,1,1] &\n (not curo[0,0,0]) & (not curo[1,0,0]) &\n (not curo[0,1,0]) & (not curo[1,1,0]) &\n (not curo[0,0,1]) & (not curo[1,0,1]) &\n (not curo[0,1,1]) &\n (not curo[0,0,2]) & (not curo[1,0,2]) & (not curo[2,0,2]) &\n (not curo[0,1,2]) & (not curo[1,1,2]) & (not curo[2,1,2]) &\n (not curo[0,2,2]) & (not curo[1,2,2]) & (not curo[2,2,2]));\n if M6:\n return True;\n \n # M7' and all rotations\n for curo in cuberots:\n M7 = (curo[2,2,0] & curo[1,1,1] &\n (not curo[0,0,0]) & (not curo[1,0,0]) & (not curo[2,0,0]) &\n (not curo[0,1,0]) & (not curo[1,1,0]) & (not curo[2,1,0]) &\n (not curo[0,2,0]) & (not curo[1,2,0]) &\n (not curo[0,0,1]) & (not curo[1,0,1]) & (not curo[2,0,1]) &\n (not curo[0,1,1]) & (not curo[2,1,1]) &\n (not curo[0,2,1]) & (not curo[1,2,1]) & (not curo[2,2,1]) &\n (not curo[0,0,2]) & (not curo[1,0,2]) & (not curo[2,0,2]) &\n (not curo[0,1,2]) & (not curo[1,1,2]) & (not curo[2,1,2]) &\n (not curo[0,2,2]) & (not curo[1,2,2]) & (not curo[2,2,2]));\n if M7:\n return True;\n \n return False;", "def test_rotate_down_counter(self):\n # Testing 'down' rotation counter-clockwise\n side = 'Dr'\n rotation = list(self.cube.rotate_cube(side))\n result = [np.array([['g', 'g'], ['g', 'g']], dtype='<U1'),\n np.array([['y', 'y'], ['y', 'y']], dtype='<U1'),\n np.array([['o', 'o'], ['o', 'o']], dtype='<U1'),\n np.array([['w', 'w'], ['w', 'w']], dtype='<U1'),\n np.array([['b', 'b'], ['b', 'b']], dtype='<U1'),\n np.array([['r', 'r'], ['r', 'r']], dtype='<U1')]\n\n np.testing.assert_array_equal(rotation, result)", "def is_rotation(s1, s2):\n\tif s1 is None or s2 is None:\n\t\traise ValueError('Invalid input')\n\n\t# Check if:\n\t# a. their lengths match\n\t# b. s1 is a substring of s2 appended to s2\n\treturn len(s1) == len(s2) and _is_substring(s2+s2, s1)", "def _areDifferent_Mat44(self, mat1, mat2, thresholdLoc = 1.0, thresholdRot = 1.0):\r\n areDifferent = False\r\n jnd_vect = mathutils.Vector((thresholdLoc,thresholdLoc,thresholdRot))\r\n t1, t2 = mat1.to_translation(), mat2.to_translation()\r\n r1, r2 = mat1.to_euler(), mat2.to_euler()\r\n for n in range(3):\r\n if (abs(t1[n]-t2[n]) > thresholdLoc) or (abs(math.degrees(r1[n]-r2[n])) > thresholdRot): areDifferent = True\r\n return areDifferent", "def checkMatrix(i,j):\n # print(i,j)\n counter = Counter([grid[di][dj] for di in range(i,i+3) for dj in range(j,j+3)])\n for k in range(1,10):\n if counter[k] != 1:\n return False\n\n rows_sum = [sum(grid[k][j:j+3]) for k in range(i,i+3)]\n # print(rows_sum)\n if not all(m == 15 for m in rows_sum):\n return False\n cols_sum = [sum(grid[q][k] for q in range(i,i+3)) for k in range(j,j+3)]\n # print(cols_sum)\n if not all(m == 15 for m in cols_sum):\n return False\n dgl_sum = sum(grid[i+k][j+k] for k in range(3))\n anti_dgl_sum = sum(grid[i+k][j+2-k] for k in range(3))\n # print(dgl_sum, anti_dgl_sum)\n if dgl_sum != 15 or anti_dgl_sum != 15:\n return False\n return True", "def check_angle(self):\n self.find_pixels()\n alpha_theta=np.deg2rad(70)\n alpha_phi=np.deg2rad(70)\n extreme_values=self.compute_extreme_values(alpha_phi, alpha_theta)\n x=np.linspace(extreme_values[0], extreme_values[1], self.number_of_pix[1])\n y=np.linspace(extreme_values[2], extreme_values[3], self.number_of_pix[0])\n phi_0=20\n phi_0=np.deg2rad(phi_0)\n j, diff=self.compute_phi(\"find_orient.png\")\n print \"j=\", j\n print \"diff=\", diff", "def determine_rotation(arm, d, tip_data, rot_data):\n n_t = np.zeros(3)\n for this_n_t in tip_data['pos_ntip_wrt_r']:\n n_t += this_n_t\n n_t /= len(tip_data['pos_ntip_wrt_r'])\n print(\"Our n_t to use in this stage: {}\".format(n_t))\n\n K = len(rot_data['pos_ntip_wrt_s'])\n errors_zyz = []\n errors_zyx = []\n\n for k in range(K):\n lhs = rot_data['pos_ntip_wrt_s'][k]\n t_st = rot_data['pos_tool_wrt_s_code'][k]\n ypr = rot_data['rot_tool_wrt_s_code'][k]\n yaw, pitch, roll = ypr[0], ypr[1], ypr[2]\n\n # R_zyz\n R_z1 = U.rotation_matrix_3x3_axis(angle=roll, axis='z')\n R_y = U.rotation_matrix_3x3_axis(angle=pitch, axis='y')\n R_z2 = U.rotation_matrix_3x3_axis(angle=yaw, axis='z')\n R_zyz = R_z2.dot(R_y).dot(R_z1)\n\n # R_zyx\n R_x = U.rotation_matrix_3x3_axis(angle=roll, axis='x')\n R_y = U.rotation_matrix_3x3_axis(angle=pitch, axis='y')\n R_z = U.rotation_matrix_3x3_axis(angle=yaw, axis='z')\n R_zyx = R_z.dot(R_y).dot(R_x)\n\n # Evaluate!\n rhs_zyz = t_st + R_zyz.dot( n_t )\n rhs_zyx = t_st + R_zyx.dot( n_t )\n err_zyz = np.linalg.norm(lhs - rhs_zyz)\n err_zyx = np.linalg.norm(lhs - rhs_zyx)\n errors_zyz.append( err_zyz )\n errors_zyx.append( err_zyx )\n print(\"\\nerr_zyz: {:.3f} for {}-th sample\".format(err_zyz, k))\n print(\"err_zyx: {:.3f} for {}-th sample\".format(err_zyx, k))\n print(\"R_zyz:\\n{}\".format(R_zyz))\n print(\"R_zyx:\\n{}\".format(R_zyx))\n\n print(\"\\nDone with evaluation!\")\n print(\"zyz has avg error {:.5f}\".format(np.mean(errors_zyz)))\n print(\"zyx has avg error {:.5f}\".format(np.mean(errors_zyx)))" ]
[ "0.61965746", "0.6008818", "0.5870018", "0.57924783", "0.566713", "0.5649514", "0.5516805", "0.543337", "0.5430252", "0.5384225", "0.5358685", "0.53534156", "0.53217334", "0.5318572", "0.53076136", "0.5304661", "0.5281336", "0.5250107", "0.5246997", "0.5241706", "0.52340746", "0.5233067", "0.5207793", "0.5186208", "0.5175118", "0.51602346", "0.51593316", "0.51483667", "0.5147833", "0.5126919" ]
0.7322308
0
The decorator for the tag class
def decorator(tag_class): name = tag_class.__name__ if name.startswith('Tag'): name = name[3:] # keep all-uppercase names, they are special tags # like LITERAL, COMMENT, OUTPUT if not name.isupper(): name = name.lower() name = [name] if tag_class_or_alias and tag_class is not tag_class_or_alias: names = tag_class_or_alias if isinstance(names, str): names = (alias.strip() for alias in names.split(',')) name = names for nam in name: self.__class__.tags[nam] = tag_class return tag_class
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tag(cls):\n pass", "def register(self, tag_class_or_alias=None, mode='standard'):\n # type: (Union[Type[Tag], str], bool) -> Callable\n # if mode == 'jekyll':\n # from .jekyll.tags import tag_manager as tmgr\n # return tmgr.register(tag_class_or_alias)\n if mode == 'python':\n from ..python.tags import tag_manager as tmgr\n return tmgr.register(tag_class_or_alias)\n\n def decorator(tag_class):\n \"\"\"The decorator for the tag class\"\"\"\n name = tag_class.__name__\n if name.startswith('Tag'):\n name = name[3:]\n # keep all-uppercase names, they are special tags\n # like LITERAL, COMMENT, OUTPUT\n if not name.isupper():\n name = name.lower()\n name = [name]\n\n if tag_class_or_alias and tag_class is not tag_class_or_alias:\n names = tag_class_or_alias\n if isinstance(names, str):\n names = (alias.strip() for alias in names.split(','))\n name = names\n\n for nam in name:\n self.__class__.tags[nam] = tag_class\n return tag_class\n\n if callable(tag_class_or_alias):\n return decorator(tag_class_or_alias)\n\n return decorator", "def tagger():", "def tag(*args, **kwargs):\n def desc(func):\n assert not hasattr(func, 'tags')\n func.tags = Tags(*args, **kwargs)\n return func\n return desc", "def decorate(self):\n\n if self.action == Tag.Action.ASSIGN:\n className = Tag.DecoratorClass.ASSIGN\n elif self.action == Tag.Action.NOTIFY:\n className = Tag.DecoratorClass.NOTIFY\n else:\n className = Tag.DecoratorClass.CATEGORIZE\n\n if self.is_completed:\n className += \" \" + Tag.DecoratorClass.COMPLETED\n\n return \"<span class=\\\"\" + className + \"\\\">\" + self.action + self.target + \"</span>\"", "def __init__(self, decorated):\n self.decorated = decorated", "def __init__(self, decorated):\n self.decorated = decorated", "def __init__(self, decorated):\n self.decorated = decorated", "def __init__(self, tag):\n self.tag = tag", "def djcat_attr():\n def decorate(cls):\n for b in cls.__bases__:\n if getattr(b, '_is_djcat_attr', None) and getattr(b, 'attr_key', None):\n setattr(cls, '_attr_class', b)\n return cls\n return decorate", "def method(cls):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n return func(*args, **kwargs)\n setattr(cls, func.__name__, wrapper)\n return func\n return decorator", "def register(cls, class_):\n cls._registered[class_.tag()] = class_", "def get_decorated_function(self):", "def add_tag(*, tag='tag!'):\n def _apply_on(f):\n setattr(f, 'my_tag', tag)\n return f\n return _apply_on", "def tag(*args, **kwargs):\n def wrap_obj(obj):\n \"\"\"wrap function\"\"\"\n for name in args:\n setattr(obj, TAG_PREFIX + name, True)\n for name, value in kwargs.iteritems():\n setattr(obj, TAG_PREFIX + name, value)\n return obj\n return wrap_obj", "def __init__(self):\n self.tag = None", "def __init__(self, tag):\n self.tag = tag.lower()\n self.attrs = {}\n self.contents = ()", "def target_tag(self):\n raise NotImplementedError", "def add_tag(*, tag='tag!', f=DECORATED):\n setattr(f, 'my_tag', tag)\n return f", "def __call__(self, *contents, **attrs):\n if len(contents) == 0:\n contents = self.contents\n if len(attrs) == 0:\n attrs = self.attrs\n\n ret = type(self)(self.tag)\n ret.contents = contents\n ret.attrs = attrs\n return ret", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass" ]
[ "0.70298386", "0.67040557", "0.6418024", "0.64037263", "0.6252756", "0.6243304", "0.6243304", "0.6243304", "0.61472267", "0.5950456", "0.59204096", "0.58158684", "0.57951266", "0.5689228", "0.5688503", "0.5664444", "0.5627366", "0.5609594", "0.560396", "0.56025034", "0.55954075", "0.55954075", "0.55954075", "0.55954075", "0.55954075", "0.55954075", "0.55954075", "0.55954075", "0.55954075", "0.55954075" ]
0.78976655
0
given some date like MONTH/DAY/YEAR, make a filename like base_path/YEAR/MONTH/DAY.md
def date_to_filename(base_path, raw_date_string): raw_date_string = raw_date_string[:-1] month, day, year = raw_date_string.split("/") relative_path = "{}/{}/{}.md".format(year, month, day) return base_path / relative_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_filename_from_date(path,date,autoincrement = True):\n \n fname = date.isoformat().replace(':','.')\n \n if autoincrement:\n\n onlyfiles = [f for f in listdir(path) if isfile(join(path, f)) and f!='.DS_Store']\n \n found_numbers = [int(f.strip('.html').split('_')[1]) for f in onlyfiles if fname == f[0:len(fname)] ]\n \n highest = -1 \n if len(found_numbers)>0:\n highest = max(found_numbers)\n \n return \"{}/{}_{}.html\".format(path,fname,highest+1)", "def get_filename(self, path, prefix, suffix, date, period):\n return os.path.join(path,\n '%s%s%s' % (\n prefix,\n self.get_filename_date(date,\n params=dict(period=period)),\n suffix))", "def get_filename(out_dir, file_date, extension):\n return path.join(out_dir, f'CrossrefCitations_{file_date}.{extension}')", "def generate_filename(\r\n filepath,\r\n filestartwith,\r\n fileendwith,\r\n run_date,\r\n filemask):\r\n\r\n filedate = generate_dateformat(run_date, filemask)\r\n if not filedate:\r\n filename = filestartwith\r\n else:\r\n filename = filestartwith + filedate\r\n\r\n if fileendwith:\r\n filename = filename + fileendwith\r\n\r\n if filepath and len(filepath.strip()) > 0:\r\n filename = filepath.strip() + '/' + filename\r\n\r\n return filename", "def _create_dir_name(date, dir_structure='ymd', is_exif=True):\n if is_exif:\n date_split = date.split(' ')[0].split(':')\n else:\n date_split = date.split(' ')[0].split('-')\n dir_name = '\\\\'\n if 'y' in dir_structure:\n dir_name += date_split[0] + '\\\\'\n if 'm' in dir_structure:\n dir_name += '_'.join(d for d in date_split[:2]) + '\\\\'\n if 'd' in dir_structure:\n dir_name += '_'.join(d for d in date_split[:3]) + '\\\\'\n return dir_name", "def normalizeFilenameToCommonDateFormat(filename):\n rgx_date = re.search(r'(\\d+)-(\\d+)-(\\d+)', filename)\n\n if (rgx_date == None):\n raise ValueError(\"Not interested in this file!\")\n \n year = rgx_date.group(1)\n month = rgx_date.group(2)\n day = rgx_date.group(3)\n\n return \"%s%s%s.pdf\" % (year, month, day)", "def date_string(date):\n day = date.day\n month = date.month\n year = date.year\n formatted_string = str(month) + \"/\"\n formatted_string += str(day) + \"/\"\n formatted_string += str(year)\n return formatted_string", "def create_filenames(date):\n \n filelist = list()\n for t in [\"0000\", \"0600\", \"1200\", \"1800\"]:\n for t2 in [\"000\", \"003\"]:\n filelist.append(\"gfsanl_4_\"+d.strftime('%Y%m%d')+\"_\"+t+\"_\"+t2+\".grb2\")\n return filelist", "def normalized_export_filename(title, extension):\n filename = timezone.localtime().strftime('%Y-%m-%d_%H-%M-%S__') + slugify(title)\n if extension.startswith(os.path.extsep):\n filename += extension\n else:\n filename += os.path.extsep + extension\n return filename", "def gen_ymd(t,d) -> str:\n ymd = ( str(t.year) + d + str(t.month) + d + str(t.day) )\n return ymd", "def get_filename_by_date(root_path: str, prefix_path: str, dates_set: set) -> str:\n for date in dates_set:\n full_path = f\"{prefix_path}_{date}.txt\"\n full_path = os.path.join(root_path, full_path)\n if os.path.exists(full_path):\n return full_path\n return \"\"", "def to_filetag(self) -> str:\n return self.strftime(f\"{self.FormatCode.YEAR.WITH_CENTURY}{self.FormatCode.MONTH.NUM}{self.FormatCode.DAY.NUM}\")", "def reformat_date(mdy_date_string):\n month, day, year = mdy_date_string.split('/')\n return f\"{year}-{month}-{day}\"", "def generate_file_name(entry):\n return str_for_file(u'{name}, {year}, {title}'.format(\n year=entry['year'],\n name=get_last_name(entry['author'][0]),\n title=entry['title']\n ))", "def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))", "def _generate_filename(doc_type, login, *args):\n filename = []\n filename.append(doc_type)\n filename.append(login)\n for item in args:\n filename.append(item)\n filename.append(datetime.datetime.now().isoformat(timespec='microseconds'))\n filename = '_'.join(filename)\n return filename", "def find_reddit_filename(wildcards):\n yearmonth = wildcards.year + '-' + wildcards.month\n if yearmonth <= '2017-11':\n ext = '.bz2'\n elif yearmonth <= '2018-10':\n ext = '.xz'\n else:\n ext = '.zst'\n return DATA + \"/downloaded/reddit/\" + yearmonth + ext", "def get_archive_filename():\r\n today = datetime.date.today()\r\n return str(today)", "def convert_name(path):\n local_timezone = get_localzone()\n aware_dt = datetime.fromtimestamp(os.path.getctime(path), local_timezone)\n date = aware_dt.strftime(\"%Y-%m-%d\")\n regex = re.compile(\"^\\d+-?(.+)\")\n match = regex.search(path.name)\n if match: stem = match.group(1)\n else: stem = \"\"\n\n return f\"{date}-{stem}\"", "def outputFilename(name=\"\", ext=\"\", time=True):\n # get the date in the format specifed\n dateTime = datetime.now()\n dateTimeFormat = \"%Y-%m-%d__%H-%M-%S\" if time else \"%Y-%m-%d\"\n fileName = dateTime.strftime(dateTimeFormat)\n\n # construct the filename\n fileName = fileName + \"_\" + name if fileName != \"\" else fileName\n ext = \".\" + ext if ext != \"\" else \"\"\n\n return fileName + ext", "def filepath(day, ind):\n if ind!=\"TradeReport\" and ind!=\"OrderDetail\" and ind!=\"OrderHistory\":\n raise NameError(' ind must be either TradeReport or OrderDetail')\n \n elif day<1 or day>31 or type(day)!=int:\n raise TypeError('day must be an integer between 1 and 31')\n \n if day<10:\n day=\"0\"+str(day)\n else:\n day=str(day)\n \n path=\"/data/LSE_DATA/raw/T_\" + ind + \"_\"+ day +\"012008.csv/\" + \"t_\" + ind +\".csv\"\n\n return path", "def give_filename( url_rel ):\n filename = basename( url_rel )\n\t# Add time information\n now_datetime = datetime.datetime.now( )\n now_string = now_datetime.strftime( \"%Y-%m-%d-%H-%M-%S\" )\n if filename.endswith( '.pdf' ):\n\t\tfileno, ext_pdf = splitext( filename )\n\t\tpdf_filename = fileno + '-' + now_string + ext_pdf\n\t\treturn pdf_filename", "def _safe_filename(filename):\n date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d-%H%M%S\")\n basename, extension = filename.rsplit('.', 1)\n return \"{0}-{1}.{2}\".format(basename, date, extension)", "def reformat_date(mdy_date_string):\n date = mdy_date_string.split('/')\n return f\"{date[2]}-{date[0]}-{date[1]}\" # difficult to read", "def reformatdate(self, date):\n# print('DATE', self.__str__())\n if 'dummy' in date:\n return '1970_01_01'\n# datesplit = date.split('/')\n datesplit = date.split('-') # Really? This had to be changed?!\n# print('DATE', date, datesplit)\n\n # dates used to be as follows\n# month = datesplit[0]\n# day = datesplit[1]\n# year = datesplit[2]\n\n # dates as of 12 June 2018 now done this way\n year = datesplit[0]\n month = datesplit[1]\n day = datesplit[2]\n\n return year + '_' + month + '_' + day", "def makefilename(self):\n fp= (pathlib.Path(self.vr_folder).expanduser()/(time.strftime(self.vr_filename))).with_suffix('')\n fp.parent.mkdir(parents=True, exist_ok=True)\n print('files setup', str(fp))\n return fp", "def format_filename(prefix, suffix, seq_len, uncased):\n seq_str = \"seq-{}\".format(seq_len)\n if uncased:\n case_str = \"uncased\"\n else:\n case_str = \"cased\"\n\n file_name = \"{}.{}.{}.{}\".format(prefix, seq_str, case_str, suffix)\n\n return file_name", "def write_file(date, num_days):\n month = '{0:02d}'.format(date.month)\n day = '{0:02d}'.format(date.day)\n\n with open(file_name, 'a+') as out_file:\n out_file.write('{}-{},{}\\n'.format(month, day, num_days))", "def out_filename(self, filetype, dir, format='old'):\n filename = self.filename(filetype=filetype, format=format)\n return Path(dir) / filename", "def calc_filename(name, split='.', include_time=False, dbg=False):\n dt_str, time_str = calc_date_time()\n strt = str(name).split(split)\n res = ''\n\n if len(strt) == 1:\n res = strt[0]\n elif len(strt) > 1:\n append = split.join(strt[1:]) if len(strt) > 2 else strt[1]\n if include_time:\n res = \"_\".join([strt[0], dt_str, time_str])\n else:\n res = \"_\".join([strt[0], dt_str])\n res = res + split + append\n else:\n res = 'FAILED'\n\n dbc.print_helper((\"calc_filename: \" + res), dbg=dbg)\n\n return res" ]
[ "0.7158393", "0.6733796", "0.6622038", "0.6564581", "0.6493209", "0.64796656", "0.64215696", "0.6268492", "0.62443155", "0.62019366", "0.6199126", "0.6181504", "0.6176599", "0.6139581", "0.611637", "0.6111604", "0.609463", "0.60822713", "0.6059886", "0.6059355", "0.60469335", "0.5992173", "0.595217", "0.594209", "0.5937425", "0.5928594", "0.5928555", "0.5900074", "0.5894971", "0.58907974" ]
0.8378213
0
Given chromosome sizes, plot divider lines and labels. Draws black lines between each chromosome, with padding. Labels each chromosome range with the chromosome name, centered in the region, under a tick. Sets the axis limits to the covered range. By default, the dividers are vertical and the labels are on the X axis of the plot. If the `along` parameter is 'y', this is transposed to horizontal dividers and the labels on the Y axis. Returns OrderedDict A table of the position offsets of each chromosome along the specified axis.
def plot_chromosome_dividers(axis, chrom_sizes, pad=None, along="x"): assert isinstance(chrom_sizes, collections.OrderedDict) if pad is None: pad = 0.003 * sum(chrom_sizes.values()) dividers = [] centers = [] starts = collections.OrderedDict() curr_offset = pad for label, size in list(chrom_sizes.items()): starts[label] = curr_offset centers.append(curr_offset + 0.5 * size) dividers.append(curr_offset + size + pad) curr_offset += size + 2 * pad if along not in ("x", "y"): raise ValueError( "Direction for plotting chromosome dividers and labels along must be either x or y." ) if along == "x": axis.set_xlim(0, curr_offset) for xposn in dividers[:-1]: axis.axvline(x=xposn, color="k") # Use chromosome names as x-axis labels (instead of base positions) axis.set_xticks(centers) axis.set_xticklabels(list(chrom_sizes.keys()), rotation=90) axis.tick_params(labelsize="small") axis.tick_params(axis="x", length=0) axis.get_yaxis().tick_left() else: axis.set_ylim(0, curr_offset) for yposn in dividers[:-1]: axis.axhline(y=yposn, color="k") # Use chromosome names as y-axis labels (instead of base positions) axis.set_yticks(centers) axis.set_yticklabels(list(chrom_sizes.keys())) axis.tick_params(labelsize="small") axis.tick_params(axis="y", length=0) axis.get_xaxis().tick_bottom() return starts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_cells(cells, min_y=0.05, max_y=0.95, label_loc=location_ura_bp,\n cen_loc=location_cen5_bp, chr_size=chrv_size_bp,\n label_colors=None, ax=None):\n def chr_coords(s):\n \"\"\"Map from [0, 1] to locaion on plot.\"\"\"\n return max_y - (max_y - min_y)*s\n # rescale linkages to [0, 1]\n cells = [np.array(links) / chr_size for links in cells]\n n_cells = len(cells)\n # and all relevant locations\n locus_frac = label_loc / chr_size\n centromere_frac = cen_loc / chr_size\n if ax is None:\n # fill entire figure with invisible axes to draw in\n fig = plt.figure(figsize=(col_width, col_width/golden_ratio))\n ax = fig.add_axes([0, 0, 1, 1])\n ax.axis('off')\n # center each of N \"cells\" directly between N+1 fenceposts spanning [0, 1]\n n_fences = n_cells + 1\n fence_posts = np.linspace(0, 1, n_fences)\n width_per_cell = np.diff(fence_posts)[0]\n cell_centers = (fence_posts[1:] + fence_posts[:-1]) / 2\n # (1/2) times the spacing between centers of two chromosomes in each \"cell\"\n width_to_chr_center = width_per_cell / 5\n chr_width = 15\n # only works with mixed backends, where 72\"PX\"/in is always true, otherwise\n # you need to do something like:\n # transAxes.inverted().transform(dpi_scale_trans.transform([1/72, 1/72])\n pt_to_ax = ax.transAxes.inverted().transform(\n ax.get_figure().dpi_scale_trans.transform([1/72, 1/72])\n )\n for i, x in enumerate(cell_centers):\n for dx in [width_to_chr_center, -width_to_chr_center]:\n cap_radius_ax = chr_width/2 * pt_to_ax[1]\n # draw the chromosomes\n ax.plot(\n [[x + dx, x + dx], [x + dx, x + dx]],\n [[chr_coords(0), chr_coords(centromere_frac) - cap_radius_ax],\n [chr_coords(centromere_frac) + cap_radius_ax, chr_coords(1)]],\n transform=ax.transAxes, linewidth=chr_width,\n solid_capstyle='round', color=[50/255, 50/255, 50/255]\n )\n ax.plot(\n [[x + dx, x + dx], [x + dx, x + dx]],\n [[chr_coords(0), chr_coords(centromere_frac) - cap_radius_ax],\n [chr_coords(centromere_frac) + cap_radius_ax, chr_coords(1)]],\n transform=ax.transAxes, linewidth=chr_width-2,\n solid_capstyle='round', color=[197/255, 151/255, 143/255]\n )\n # draw the centromere black dot\n ax.scatter([x + dx], [chr_coords(centromere_frac)],\n zorder=10, transform=ax.transAxes, s=200, color='k')\n # draw the label, green star\n ax.scatter([x + dx], [chr_coords(locus_frac)],\n zorder=15, transform=ax.transAxes, s=500, color='g',\n marker='*', edgecolors='k')\n for linkage in cells[i]:\n ax.plot([x - width_to_chr_center, x + width_to_chr_center],\n 2*[chr_coords(linkage)],\n color=(0, 0, 1), transform=ax.transAxes,\n linewidth=5, solid_capstyle='round')\n num_linkages = len(cells[i])\n j = np.searchsorted(cells[i], locus_frac)\n closest_links = []\n if j != 0:\n closest_links.append(cells[i][j - 1])\n if j != num_linkages:\n closest_links.append(cells[i][j])\n closest_links = np.array(closest_links)\n if len(closest_links) > 0:\n linewidths = 1.2*np.ones_like(closest_links)\n closestest_link = np.argmin(np.abs(closest_links - locus_frac))\n linewidths[closestest_link] = 3.5\n for k, linkage in enumerate(closest_links):\n ax.plot([x - width_to_chr_center, x - width_to_chr_center,\n x + width_to_chr_center, x + width_to_chr_center],\n [chr_coords(locus_frac), chr_coords(linkage),\n chr_coords(linkage), chr_coords(locus_frac)],\n color=(1, 1, 1), transform=ax.transAxes,\n linewidth=linewidths[k], linestyle='--',\n dash_capstyle='butt', zorder=100)\n if label_colors:\n # add extra height above chromosomes to account for rounded end\n # caps and then some extra\n ax.transAxes\n ax.text(x, max_y, f'Cell {i}\\n', ha='center',\n va='bottom', color=label_colors[i],\n transform=ax.transAxes,\n fontsize=mpl.rcParams['axes.titlesize'])\n return ax", "def _draw_dividers(chrom_offsets, ax):\n\n positions = np.array(list(chrom_offsets.values()))\n\n # Draw dividers.\n for loc in positions[1:-1]:\n ax.axvline(loc, color='grey', lw=0.5, zorder=5)\n\n # Draw xtick labels.\n ax.set_xticks((positions[:-1] + positions[1:]) / 2)\n ax.set_xticklabels(chrom_offsets.keys())\n\n # Set xlim to boundaries.\n ax.set_xlim(0, chrom_offsets['_END_'])", "def autolabel(rects, xpos='center'):\n\n ha = {'center': 'center', 'right': 'left', 'left': 'right'}\n offset = {'center': 0, 'right': 1, 'left': -1}\n\n for rect in rects:\n height = rect.get_height().round(2)\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(offset[xpos]*3, 3), \n textcoords=\"offset points\", \n ha=ha[xpos], va='bottom', fontsize=14)", "def fig_linear_genome(plotf, genome, chromosomes=('2R', '2L', '3R', '3L', 'X'),\n fig=None, bottom=0, height=1, width_factor=1.08, chrom_pad=0.035,\n clip_patch_kwargs=None, **kwargs):\n\n from matplotlib.path import Path\n\n # compute assembled genome size\n genome_size = sum(len(genome[chrom]) for chrom in chromosomes)\n\n # setup figure\n if fig is None:\n fig = plt.figure(figsize=(8, 1))\n\n # setup clip patch\n if clip_patch_kwargs is None:\n clip_patch_kwargs = dict()\n clip_patch_kwargs.setdefault('edgecolor', 'k')\n clip_patch_kwargs.setdefault('facecolor', 'none')\n clip_patch_kwargs.setdefault('lw', 1)\n\n # setup axes\n left = 0\n axs = dict()\n\n for chrom in chromosomes:\n\n # calculate width needed for this chrom\n width = len(genome[chrom]) / (genome_size * width_factor)\n\n # create axes\n ax = fig.add_axes([left, bottom, width, height])\n ax.set_axis_bgcolor((1, 1, 1, 0))\n axs[chrom] = ax\n\n # construct clip path\n if chrom in {'2R', '3R'}:\n verts = [(0.01, 0.02), (0.9, 0.02), (1.01, 0.3), (1.01, 0.7), (0.9, .98), (0.01, .98), (0.01, 0.02)]\n codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]\n elif chrom == \"X\":\n verts = [(0.01, 0.02), (0.9, 0.02), (0.99, 0.3), (0.99, 0.7), (0.9, .98), (0.01, .98), (0.01, 0.02)]\n codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]\n else:\n verts = [(0.1, 0.02), (.99, 0.02), (.99, .98), (.1, .98), (-0.01, .7), (-0.01, .3), (0.1, 0.02)]\n codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]\n path = Path(verts, codes)\n clip_patch = mpl.patches.PathPatch(path, transform=ax.transAxes, **clip_patch_kwargs)\n\n # do the plotting\n plotf(chrom=chrom, ax=ax, clip_patch=clip_patch, **kwargs)\n\n # increment left coordinate\n left += len(genome[chrom]) / (genome_size * width_factor)\n if chrom in {'2L', '3L'}:\n left += chrom_pad\n\n return axs", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(np.round(height, 2)),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects):\n #for rect in rects:\n for i in range(len(rects)):\n rect = rects[i]\n height = rect.get_height()\n ax.annotate('{}'.format(('%.2f' % (height)) + '% of\\n' + ('%d' % range_data[i].shape[0]) + ' people' ),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects):\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.annotate('{}'.format(height),\r\n xy=(rect.get_x() + rect.get_width() / 2, height),\r\n xytext=(0, 3), # 3 points vertical offset\r\n textcoords=\"offset points\",\r\n ha='center', va='bottom')", "def autolabel(rects):\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.annotate('{}'.format(height),\r\n xy=(rect.get_x() + rect.get_width() / 2, height),\r\n xytext=(0, 3), # 3 points vertical offset\r\n textcoords=\"offset points\",\r\n ha='center', va='bottom')", "def autolabel(rects):\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.annotate('{}'.format(height),\r\n xy=(rect.get_x() + rect.get_width() / 2, height),\r\n xytext=(0, 3), # 3 points vertical offset\r\n textcoords=\"offset points\",\r\n ha='center', va='bottom')", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 3, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax5.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 2), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects, ax, offset=0):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2+offset, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects,ax):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects,ax):\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.annotate('{}'.format(height),\r\n xy=(rect.get_x() + rect.get_width() / 2, height),\r\n xytext=(0, 2), \r\n textcoords=\"offset points\",\r\n ha='center', va='bottom', rotation=0)", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(np.around(height,2)),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n pyplot.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, -75), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom', rotation=90)", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate(\n \"{}\".format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha=\"center\",\n va=\"bottom\",\n )", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate(\n \"{}\".format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha=\"center\",\n va=\"bottom\",\n )", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3),\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate(\"%.2f\"%(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom', fontsize=7)", "def autolabel(rects):\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.annotate('{}'.format(height),\r\n xy=(rect.get_x() + rect.get_width() / 2, height),\r\n xytext=(0, 3), # 3 points vertical offset\r\n textcoords=\"offset points\",\r\n ha='center', va='bottom')", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{0:.2f}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def autolabel(rects, xpos='center'):\n\n ha = {'center': 'center', 'right': 'left', 'left': 'right'}\n offset = {'center': 0, 'right': 1, 'left': -1}\n\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(offset[xpos]*2, 2), # use 3 points offset\n textcoords=\"offset points\", # in both directions\n ha=ha[xpos], va='bottom')" ]
[ "0.59855926", "0.5667719", "0.53648233", "0.535239", "0.53448987", "0.5336877", "0.53227484", "0.53227484", "0.53227484", "0.53122205", "0.53122205", "0.53122205", "0.53122205", "0.53122205", "0.53122205", "0.53122205", "0.53104365", "0.5308428", "0.5303463", "0.5296885", "0.52874345", "0.52868825", "0.5278056", "0.52742", "0.52742", "0.52733403", "0.5271158", "0.52709764", "0.52706486", "0.5263225" ]
0.7555898
0
Create an ordered mapping of chromosome names to sizes.
def chromosome_sizes(probes, to_mb=False): chrom_sizes = collections.OrderedDict() for chrom, rows in probes.by_chromosome(): chrom_sizes[chrom] = rows["end"].max() if to_mb: chrom_sizes[chrom] *= MB return chrom_sizes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def size(map):\n return map['size']", "def get_chromosome_length(genome):\n \n chr_list = {}\n \n for key in genome:\n chr_list[key] = len(genome[key])\n\n return chr_list", "def sort_mapping_by_size(cluster_mapping):\r\n\r\n return sorted(cluster_mapping.keys(),\r\n cmp=lambda a, b: cmp(len(a), len(b)),\r\n key=lambda k: cluster_mapping[k], reverse=True)", "def sizes(self) -> dict:\n raise NotImplementedError", "def get_size_distribution(self) -> Dict[int, int]:\n size_dist = dict()\n for complex_expression, complex_abundance in self.get_all_complexes_and_abundances():\n current_size = complex_expression.get_size_of_complex()\n if current_size in size_dist:\n size_dist[current_size] += complex_abundance\n else:\n size_dist[current_size] = complex_abundance\n sorted_dist = dict(sorted(size_dist.items(), key=lambda item: item[0]))\n return sorted_dist", "def get_map_size(level):\n if level < 5:\n return 5, 5\n if level < 70:\n return 10, 10\n if level < 150:\n return 25, 25\n return 50, 50", "def _chrom_sizes(fasta_file):\n from pysam import FastaFile\n fa = FastaFile(fasta_file)\n chrom_lens = OrderedDict([(name, l) for name, l in zip(fa.references, fa.lengths)])\n if len(chrom_lens) == 0:\n raise ValueError(f\"no chromosomes found in fasta file: {fasta_file}. \"\n \"Make sure the file path is correct and that the fasta index \"\n \"file {fasta_file}.fai is up to date\")\n fa.close()\n return chrom_lens", "def make_stats(mapping):\r\n stats = [\"Clustersize\\t#\"]\r\n counts = defaultdict(int)\r\n for key in mapping.keys():\r\n counts[len(mapping[key])] += 1\r\n\r\n keys = sorted(counts.keys())\r\n for key in keys:\r\n stats.append(\"%d:\\t\\t%d\" % (key + 1, counts[key]))\r\n return \"\\n\".join(stats)", "def chromsizes(genome):\n try:\n return getattr(genome_registry, genome)\n except AttributeError:\n return get_chromsizes_from_ucsc(genome)", "def size(name):", "def custom_dictionary(nMarkers, markerSize):\n pass", "def summarize_otu_sizes_from_otu_map(otu_map_f):\r\n result = {}\r\n for otu_id, seq_ids in fields_to_dict(otu_map_f).items():\r\n otu_size = len(seq_ids)\r\n try:\r\n result[otu_size] += 1\r\n except KeyError:\r\n result[otu_size] = 1\r\n\r\n result = sorted(result.items())\r\n return result", "def num_species_on_map(self):\n # tot_herbivores = 0\n # tot_carnivores = 0\n # for cells in itertools.chain.from_iterable(self.map):\n # curr_herbivore, curr_carnivore = cells.num_species_per_cell()\n # tot_herbivores += curr_herbivore\n # tot_carnivores += curr_carnivore\n\n return (sum(x) for x in zip(*[cells.num_species_per_cell() for cells in itertools.chain.from_iterable(self.map)]))\n\n # (sum(x) for x in zip(*[cells.num_species_per_cell() for cells in itertools.chain.from_iterable(self.map)]))", "def chromosome_lengths(self):\n chr_lens = {}\n for r in self.regions(lazy=True):\n if chr_lens.get(r.chromosome) is None:\n chr_lens[r.chromosome] = r.end\n continue\n if r.end > chr_lens[r.chromosome]:\n chr_lens[r.chromosome] = r.end\n return chr_lens", "def map_invariant_chroms(self):\n for chrom in self.chrom_sizes:\n if chrom not in self.map:\n self.map[chrom] = [SeqRegion(1, 1, self.chrom_sizes[chrom])]", "def udcall_map_size(*args):\n return _ida_hexrays.udcall_map_size(*args)", "def test_count_genomic_region_sizes(self):\n \n regions = OrderedDict()\n regions[\"exons\"] = \"Exon\"\n regions[\"utr3\"] = \"3' UTR\"\n regions[\"utr5\"] = \"5' UTR\"\n regions[\"proxintron500\"] = \"Proximal\\nIntron\"\n regions[\"distintron500\"] = \"Distal\\nIntron\"\n results = count_genomic_region_sizes(os.path.join(clipper.test_dir(), \"regions\"), regions, \"mm9\")", "def eamap_size(*args):\n return _ida_hexrays.eamap_size(*args)", "def map_csv_dimensions(length: str, width: str, height: str):\n return {\n \"length\": length,\n \"width\": width,\n \"height\": height\n }", "def load_sizes(self):\n out = {}\n sizes = self.api.photos_getSizes(photo_id=self.id)\n for xml in sizes.find(\"sizes\").findall(\"size\"):\n size = xml.attrib\n slug = slugify(size[\"label\"], ((\" \", \"\"),))\n out[slug] = size\n return out", "def ngram_sizes(self):\n return self.size_freqs.keys()", "def _assign_sizes(self):", "def test_summarize_otu_sizes_from_otu_map(self):\r\n otu_map_f = \"\"\"O1\tseq1\r\no2\tseq2\tseq3\tseq4\tseq5\r\no3\tseq5\r\no4\tseq6\tseq7\"\"\".split('\\n')\r\n expected = [(1, 2), (2, 1), (4, 1)]\r\n self.assertEqual(summarize_otu_sizes_from_otu_map(otu_map_f), expected)", "def set_chrom_dict():\n chrom_dict = {\n str(i):'chr' + str(i) for i in range(1, MAXCHROM)\n }\n chrom_dict.update({\n 'X':'chr23',\n 'Y':'chr24',\n 'XY':'chr25',\n 'M':'chr26',\n 'MT':'chr26',\n 'chrX':'chr23',\n 'chrY':'chr24',\n 'chrXY':'chr25',\n 'chrM':'chr26',\n 'chrMT':'chr26'\n })\n return chrom_dict, MAXCHROM", "def _create_chrom_dict(chrom_len_fpath: Path) -> Dict[str, int]:\n\n chrom_dict: Dict[str, int] = {}\n with chrom_len_fpath.open() as c_f:\n next(c_f) # skip header\n line: str\n for line in c_f:\n chrom_dict[line.split(\"\\t\")[0]] = int(line.split(\"\\t\")[1])\n\n return chrom_dict", "def add_sizes(strings):\n c = []\n for i in strings:\n c.append((i, len(i)))\n return c", "def Dictionary_create(nMarkers, markerSize):\n pass", "def getContigSizes( self ):\n if not self.mIsLoaded: self.__loadIndex()\n contig_sizes = {}\n for key, val in self.mIndex.items():\n contig_sizes[key] = val[2]\n return contig_sizes", "def CodePagesToReachedSize(reached_symbol_names, page_to_symbols):\n reached_symbol_names = set(reached_symbol_names)\n page_to_reached = {}\n for offset in page_to_symbols:\n total_size = sum(x[1] for x in page_to_symbols[offset])\n reached_size = sum(\n size_in_page for (name, size_in_page) in page_to_symbols[offset]\n if name in reached_symbol_names)\n page_to_reached[offset] = {'total': total_size, 'reached': reached_size}\n return page_to_reached", "def name_distribution_from_dict(d):\n def get_number_chars(los):\n \"returns the number of characters in the given list of strings\"\n res = 0\n for s in los:\n res += len(s)\n return res\n\n dist = dict((k, get_number_chars(v)) for (k, v) in d.items())\n total = 0\n print dist\n for k, v in dist.items():\n total += v\n\n return dict((k, v/float(total)) for (k, v) in dist.items())" ]
[ "0.6174769", "0.6040503", "0.6033447", "0.59314597", "0.5925503", "0.5911833", "0.5901245", "0.5816838", "0.57068896", "0.5703283", "0.56478256", "0.56461716", "0.558924", "0.55857843", "0.54875106", "0.54366624", "0.53771925", "0.5367268", "0.5364834", "0.5347153", "0.53456485", "0.534388", "0.53312284", "0.53289574", "0.52957654", "0.5287888", "0.52774256", "0.5267706", "0.52497876", "0.52482736" ]
0.6786019
0
Convert start/end positions from genomic to binwise coordinates. Instead of chromosomal basepairs, the positions indicate enumerated bins. Revise the start and end values for all GenomicArray instances at once, where the `cnarr` bins are mapped to corresponding `segments`, and `variants` are grouped into `cnarr` bins as well if multiple `variants` rows fall within a single bin, equallyspaced fractional positions are used. Returns copies of the 3 input objects with revised `start` and `end` arrays.
def update_binwise_positions(cnarr, segments=None, variants=None): cnarr = cnarr.copy() if segments: segments = segments.copy() seg_chroms = set(segments.chromosome.unique()) if variants: variants = variants.copy() var_chroms = set(variants.chromosome.unique()) # ENH: look into pandas groupby innards to get group indices for chrom in cnarr.chromosome.unique(): # Enumerate bins, starting from 0 # NB: plotted points will be at +0.5 offsets c_idx = cnarr.chromosome == chrom c_bins = cnarr[c_idx] # .copy() if segments and chrom in seg_chroms: # Match segment boundaries to enumerated bins c_seg_idx = (segments.chromosome == chrom).values seg_starts = np.searchsorted( c_bins.start.values, segments.start.values[c_seg_idx] ) seg_ends = np.r_[seg_starts[1:], len(c_bins)] segments.data.loc[c_seg_idx, "start"] = seg_starts segments.data.loc[c_seg_idx, "end"] = seg_ends if variants and chrom in var_chroms: # Match variant positions to enumerated bins, and # add fractional increments to multiple variants within 1 bin c_varr_idx = (variants.chromosome == chrom).values c_varr_df = variants.data[c_varr_idx] # Get binwise start indices of the variants v_starts = np.searchsorted(c_bins.start.values, c_varr_df.start.values) # Overwrite runs of repeats with fractional increments, # adding the cumulative fraction to each repeat for idx, size in list(get_repeat_slices(v_starts)): v_starts[idx] += np.arange(size) / size variant_sizes = c_varr_df.end - c_varr_df.start variants.data.loc[c_varr_idx, "start"] = v_starts variants.data.loc[c_varr_idx, "end"] = v_starts + variant_sizes c_starts = np.arange(len(c_bins)) # c_idx.sum()) c_ends = np.arange(1, len(c_bins) + 1) cnarr.data.loc[c_idx, "start"] = c_starts cnarr.data.loc[c_idx, "end"] = c_ends return cnarr, segments, variants
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grid_to_bins(grid, start_bin_val, end_bin_val):\n bin_centers = (grid[1:] + grid[:-1])/2.0\n bins = np.concatenate([[start_bin_val], bin_centers, [end_bin_val]])\n return bins", "def concat_ranges_nb(a, start_idxs, end_idxs):\n out = np.empty((end_idxs[0] - start_idxs[0], start_idxs.shape[0] * a.shape[1]), dtype=a.dtype)\n for col in range(a.shape[1]):\n out[:, col * start_idxs.shape[0]:(col + 1) * start_idxs.shape[0]] = \\\n concat_ranges_1d_nb(a[:, col], start_idxs, end_idxs)\n return out", "def populate_coordinate_list(start, end):\n # print(\"im am here:\" , coordinates, DNA_start)\n corod_list = []\n # DNA start is the gene start in the gff\n # coord is the up stream as defined by the region of interest.\n # is gene is (+) coding: DNA_start > coordinates\n if start > end: # + coding\n for number in range(end, start):\n # print(\"DNA start greater, should be +\", direction)\n # need to get rid of negative coodinates is there\n # are any \n if number < 1:\n continue\n corod_list.append(int(number))\n corod_list = corod_list[::-1]\n if start < end:\n for number in range(start, end):\n # need to get rid of negative coodinates is there\n # are any \n if number < 1:\n continue\n corod_list.append(int(number))\n # print(corod_list)\n # we return a reversed list. So we can go through the coorinates away\n # from the gene to see to see if it fals into a gene \n return corod_list", "def concat_ranges_1d_nb(a, start_idxs, end_idxs):\n out = np.empty((end_idxs[0] - start_idxs[0], start_idxs.shape[0]), dtype=a.dtype)\n for idx in range(start_idxs.shape[0]):\n out[:, idx] = a[start_idxs[idx]:end_idxs[idx]]\n return out", "def split_ranges_into_sets(start_idxs: tp.ArrayLike, end_idxs: tp.ArrayLike,\n set_lens: tp.MaybeSequence[tp.Sequence[float]] = (),\n left_to_right: tp.MaybeSequence[bool] = True) -> RangesT:\n start_idxs = np.asarray(start_idxs)\n end_idxs = np.asarray(end_idxs)\n checks.assert_len_equal(start_idxs, end_idxs)\n\n for i in range(len(start_idxs)):\n start_idx = start_idxs[i]\n end_idx = end_idxs[i]\n\n range_len = end_idx - start_idx + 1\n new_set_lens = []\n if len(set_lens) == 0:\n yield (np.arange(start_idx, end_idx + 1),)\n else:\n if checks.is_sequence(set_lens[0]):\n _set_lens = set_lens[i]\n else:\n _set_lens = set_lens\n if checks.is_sequence(left_to_right):\n _left_to_right = left_to_right[i]\n else:\n _left_to_right = left_to_right\n for j, set_len in enumerate(_set_lens):\n if 0 < set_len < 1:\n set_len = math.floor(set_len * range_len)\n if set_len == 0:\n raise ValueError(f\"Set {j} in the range {i} is empty\")\n new_set_lens.append(set_len)\n if sum(new_set_lens) < range_len:\n if _left_to_right:\n new_set_lens = new_set_lens + [range_len - sum(new_set_lens)]\n else:\n new_set_lens = [range_len - sum(new_set_lens)] + new_set_lens\n else:\n raise ValueError(f\"Range of length {range_len} too short to split into {len(_set_lens) + 1} sets\")\n\n # Split each range into sets\n idx_offset = 0\n set_ranges = []\n for set_len in new_set_lens:\n new_idx_offset = idx_offset + set_len\n set_ranges.append(np.arange(start_idx + idx_offset, start_idx + new_idx_offset))\n idx_offset = new_idx_offset\n\n yield tuple(set_ranges)", "def gen_ranges(starts, ends):\n if starts.size != ends.size:\n raise ValueError(\"starts and ends must be same size\")\n if not ((ends - starts) > 0).all():\n raise ValueError(\"all ends must be greater than starts\")\n lengths = ends - starts\n segs = ak.cumsum(lengths) - lengths\n totlen = lengths.sum()\n slices = ak.ones(totlen, dtype=ak.int64)\n diffs = ak.concatenate((ak.array([starts[0]]), \n starts[1:] - starts[:-1] - lengths[:-1] + 1))\n slices[segs] = diffs\n return segs, ak.cumsum(slices)", "def bin_sizing(self):\n\n self.log.info(\"Begin Re-Binning the Genome Space.\")\n new_list = []\n seg_num = 0\n\n for chrom in natsort.natsorted(self.seg_analyzer.chrom_list):\n self.log.debug(\"Binning Chromosome {0}\".format(chrom))\n\n # Some chromosomes have no segments.\n try:\n chrom_slice = \\\n self.seg_analyzer.seg_copy_array[self.seg_analyzer.seg_copy_array[:, 1] == chrom.encode()]\n seg_count = chrom_slice.shape[0]\n coord_start = int(chrom_slice[0, 2])\n except IndexError:\n continue\n\n for i in range((seg_count-1)):\n if (i+1) < seg_count and (i+1) % int(self.args.Combine_Segments) == 0:\n coord_stop = int(chrom_slice[i, 3])\n new_list.append([seg_num, chrom.encode(), coord_start, coord_stop])\n\n coord_start = int(chrom_slice[i+1, 2])\n seg_num += 1\n\n self.log.info(\"Genome Space Successfully Re-Binned.\")\n\n return numpy.array(new_list, dtype='object')", "def gene_coords_by_range(probes, chrom, start, end, ignore=params.IGNORE_GENE_NAMES):\n ignore += params.ANTITARGET_ALIASES\n # Tabulate the genes in the selected region\n genes = collections.OrderedDict()\n for row in probes.in_range(chrom, start, end):\n name = str(row.gene)\n if name in genes:\n genes[name][1] = row.end\n elif name not in ignore:\n genes[name] = [row.start, row.end]\n # Reorganize the data structure\n return {\n chrom: [(gstart, gend, name) for name, (gstart, gend) in list(genes.items())]\n }", "def translate_region_to_bins(region, bins):\n if region is None:\n return Region(None, None, None)\n chrom, start, end = unpack_range(region)\n if start is None and end is None:\n return Region(chrom, start, end)\n if start is None:\n start = 0\n if end is None:\n end = float(\"inf\")\n # NB: only bin start positions matter here\n c_bin_starts = bins.data.loc[bins.data.chromosome == chrom, \"start\"].values\n r_start, r_end = np.searchsorted(c_bin_starts, [start, end])\n return Region(chrom, r_start, r_end)", "def _make_bins(start, stop, step):\n bin_edges = np.arange(start, stop + step, step)\n\n return bin_edges", "def __call__(self, n_bins, segment, elements):\n\n # n_bins\n assert type(n_bins) is int\n assert n_bins > 0\n\n # segment\n assert type(segment) is list or type(segment) is tuple\n assert len(segment) == 2\n assert np.isscalar(segment[0]) and np.isscalar(segment[1])\n assert segment[0] < segment[1]\n\n # elements\n assert type(elements) is np.ndarray, f\"elements should be an np.ndarray, instead of {type(elements)}\"\n assert elements.dtype == np.number\n\n sorted_elements = np.sort(elements)\n\n bin_card = int(floor(elements.shape[0]/n_bins))\n\n bin_boundaries = [segment[0]]\n\n for i in range(1, n_bins):\n boundary_l = sorted_elements[i*bin_card - 1]\n boundary_r = sorted_elements[i * bin_card]\n boundary = (boundary_l+boundary_r)/2\n\n bin_boundaries.append(boundary)\n\n bin_boundaries.append(segment[1])\n\n return np.array(bin_boundaries)", "def get_inter_cds_regions(annotations):\n # Determine locations of inter-CDS regions for each chromosome\n inter_cds_regions = {}\n\n for chr_id, chromosome in annotations.items():\n # get chromosome dimensions (the first feature represents the\n # chromosome itself)\n ch_end = int(chromosome.features[0].location.end)\n\n # filter out everything except for genes\n genes = [x for x in chromosome.features if x.type == 'gene']\n\n # order by position along chromosome\n genes.sort(key=lambda x: x.location.start)\n\n # add range before first gene\n start = 0\n\n # keep track of strand of polycistronic transcriptional unit\n strand = None\n\n inter_cds_regions[chr_id] = {\n -1: [],\n +1: []\n }\n\n # iterate through genes and store the ranges between them;\n # for TriTrypDB files, the gene boundaries are generally the same\n # as the CDS boundaries.\n for gene in genes:\n # Determine location for the region up to start of the gene\n end = int(gene.location.start)\n\n # Skip over snoRNAs, etc. that are nested inside of other genes\n # For example: TcCLB TcChr22-2 179,000:180,000\n if end <= start:\n continue\n\n # Add CDS to relevant list based on strand\n if strand is None:\n # Left-most gene\n inter_cds_regions[chr_id][gene.location.strand].append((start, end))\n elif strand != gene.location.strand:\n # Add ORFs in both directions at transcription switch sites (TSSs)\n inter_cds_regions[chr_id][+1].append((start, end))\n inter_cds_regions[chr_id][-1].append((start, end))\n else:\n # Within PTU; look for ORFs on same strand\n inter_cds_regions[chr_id][strand].append((start, end))\n\n # update start counter and strand\n start = int(gene.location.end)\n strand = gene.location.strand\n\n # add region after last gene\n inter_cds_regions[chr_id][strand].append((start, ch_end))\n\n return inter_cds_regions", "def tnuc_range2gnuc_range(self, tbeg, tend):\n np = self.position_array()\n # print self.\n # print len(np)\n # print tbeg, tend\n return tnuc_range2gnuc_range_(np, tbeg, tend)", "def masktoregions(in_mask):\n regions = []\n for i in [0,1]: # do the thing for the first and second strands\n current_strand = in_mask[i].copy().astype(float)\n current_strand[-1] = np.nan # set final position to np.nan to avoid overlap issues\n transitions = current_strand - np.roll(current_strand,1)\n true_start = np.where(transitions == 1)[0]\n true_end = np.where(transitions == -1)[0] - 1\n if current_strand[0] == 1: # if starts on True, add True start to front end\n true_start = np.r_[0,true_start]\n if in_mask[i][-1] == True: # if ends on True, add True end to back end\n true_end = np.r_[true_end, len(current_strand)-1]\n if in_mask[i][-2] == False: # if the one before is False, it's a single point True\n true_start = np.r_[true_start,len(current_strand)-1]\n if np.all(in_mask[i][-2:] == [True, False]):\n true_end = np.r_[true_end, len(current_strand)-2]\n regions.append(np.asarray([np.zeros(len(true_start))+i,true_start,true_end]).T)\n out_regions = np.concatenate(regions,axis=0).astype(int)\n return out_regions", "def construct_b(self, start, end):\n return np.concatenate((start, end), axis = 0)", "def split(self, X: tp.ArrayLike, n: tp.Optional[int] = None, range_len: tp.Optional[float] = None,\n min_len: int = 1, start_idxs: tp.Optional[tp.ArrayLike] = None,\n end_idxs: tp.Optional[tp.ArrayLike] = None, **kwargs) -> RangesT:\n X = to_any_array(X)\n if isinstance(X, (pd.Series, pd.DataFrame)):\n index = X.index\n else:\n index = pd.Index(np.arange(X.shape[0]))\n\n # Resolve start_idxs and end_idxs\n if start_idxs is None and end_idxs is None:\n if range_len is None and n is None:\n raise ValueError(\"At least n, range_len, or start_idxs and end_idxs must be set\")\n if range_len is None:\n range_len = len(index) // n\n if 0 < range_len < 1:\n range_len = math.floor(range_len * len(index))\n start_idxs = np.arange(len(index) - range_len + 1)\n end_idxs = np.arange(range_len - 1, len(index))\n elif start_idxs is None or end_idxs is None:\n raise ValueError(\"Both start_idxs and end_idxs must be set\")\n else:\n if isinstance(start_idxs, pd.Index):\n start_idxs = np.asarray([find_first_occurrence(idx, index) for idx in start_idxs])\n else:\n start_idxs = np.asarray(start_idxs)\n if isinstance(end_idxs, pd.Index):\n end_idxs = np.asarray([find_first_occurrence(idx, index) for idx in end_idxs])\n else:\n end_idxs = np.asarray(end_idxs)\n\n # Filter out short ranges\n start_idxs, end_idxs = np.broadcast_arrays(start_idxs, end_idxs)\n range_lens = end_idxs - start_idxs + 1\n min_len_mask = range_lens >= min_len\n if not np.any(min_len_mask):\n raise ValueError(f\"There are no ranges that meet range_len>={min_len}\")\n start_idxs = start_idxs[min_len_mask]\n end_idxs = end_idxs[min_len_mask]\n\n # Evenly select n ranges\n if n is not None:\n if n > len(start_idxs):\n raise ValueError(f\"n cannot be bigger than the maximum number of ranges {len(start_idxs)}\")\n idxs = np.round(np.linspace(0, len(start_idxs) - 1, n)).astype(int)\n start_idxs = start_idxs[idxs]\n end_idxs = end_idxs[idxs]\n\n return split_ranges_into_sets(start_idxs, end_idxs, **kwargs)", "def __init__(self, start: np.ndarray, end: np.ndarray):\n self.start = np.reshape(start, 2)\n self.end = np.reshape(end, 2)", "def isolate_range(start_addr, end_addr):\n\n split_classification(start_addr)\n split_classification(end_addr)", "def getCoveringRanges( self, left_ranges, right_ranges, parent_ranges ):\n \n child_ranges = map( lambda x: (x[0], x[1], 0), left_ranges)\n child_ranges += map( lambda x: (x[0], x[1], 1), right_ranges)\n \n child_ranges.sort()\n parent_ranges.sort()\n \n new_left_ranges = []\n new_right_ranges = []\n \n parent_index = 0\n last_to = 0\n \n parent_left, parent_right = parent_ranges[parent_index]\n\n self.debug( \"child_ranges=%s\" % str(child_ranges) )\n self.debug( \"parent_ranges=%s\" % str(parent_ranges))\n \n last_left, last_right, last_is_right = child_ranges[0]\n \n for this_left, this_right, this_is_right in child_ranges[1:]:\n \n ## look at previous segment last_left to last_right:\n ## find matching parent_index:\n old_parent_index = parent_index\n while (min(parent_right, last_right) - max(parent_left, last_left)) < 0:\n parent_index += 1\n if parent_index == len(parent_ranges): break\n parent_left, parent_right = parent_ranges[parent_index]\n \n ## skip fragments that do not overlap\n if parent_index == len(parent_ranges):\n parent_index = old_parent_index\n last_left, last_right, last_is_right = this_left, this_right, this_is_right\n continue\n \n ## firstly: make segment covering\n new_left = min(parent_left, last_left)\n new_right = min(max(parent_right, last_right), this_left)\n \n if last_is_right:\n new_right_ranges.append((new_left, new_right))\n else:\n new_left_ranges.append((new_left, new_right))\n \n ## reduce parent on left side\n parent_left=max(new_right, parent_left)\n \n last_left, last_right, last_is_right = this_left, this_right, this_is_right\n \n ## process last segment\n while (min(parent_right, last_right) - max(parent_left, last_left)) < 0:\n parent_index += 1\n if parent_index >= len(parent_ranges): break \n parent_left, parent_right = parent_ranges[parent_index]\n \n new_left = min(parent_left, last_left)\n new_right = max(parent_right, last_right)\n \n if last_is_right:\n new_right_ranges.append((new_left, new_right))\n else:\n new_left_ranges.append((new_left, new_right))\n \n self.debug( \"old left ranges=%s\" % str(left_ranges))\n self.debug( \"new left ranges=%s\" % str(new_left_ranges))\n self.debug( \"old right ranges=%s\" % str(right_ranges))\n self.debug( \"new right ranges=%s\" % str(new_right_ranges))\n \n return new_left_ranges, new_right_ranges", "def tnuc_range2gnuc_range_(np, tbeg, tend):\n try:\n return min(np[tbeg-1], np[tend-1]), max(np[tbeg-1], np[tend-1])\n except IndexError:\n raise IncompatibleTranscriptError('invalid_cDNA_range_[%d_%d];expect_[0_%d]' % (tbeg, tend, len(np)))", "def break_segments_at_points( intervals, positions, verbose=False ):\n starts, ends = map(list, zip(*intervals))\n for chrom_end_pos in positions:\n assert chrom_end_pos <= ends[-1], \"segment end does not include end of genome\"\n assert chrom_end_pos >= starts[0], \"start of genome to the left of first segment\"\n spos = bisect.bisect_left(starts, chrom_end_pos)\n\n if verbose:\n print \"-\"*80\n print \"position %d\"%chrom_end_pos\n print \"spos %d\"%spos\n if spos < len(starts) and starts[spos] == chrom_end_pos:\n if verbose:\n print \"already a segment boundary\"\n else:\n epos = bisect.bisect_left(ends, chrom_end_pos)\n if verbose:\n print \"epos %d\"%epos\n assert spos == epos+1\n if chrom_end_pos < ends[epos]:\n if verbose:\n print \"Break [%d, %d) into [%d, %d) and [%d, %d)\"%(starts[spos-1], ends[epos], \n starts[spos-1], chrom_end_pos,\n chrom_end_pos, ends[epos])\n print \"Delete %d,%d\"%(starts[epos], ends[epos])\n\n # update epos\n copy_end = ends[epos]\n ends[epos] = chrom_end_pos\n # update epos+1\n starts.insert(epos+1, chrom_end_pos)\n ends.insert(epos+1, copy_end)\n if verbose:\n print \"epos: \", (starts[epos], ends[epos])\n print \"epos+1: \", (starts[epos+1], ends[epos+1])\n elif chrom_end_pos == ends[epos]:\n if verbose:\n print \"already a segment boundary\"\n return zip(starts, ends)", "def to_arrays(self, xmin=None, xmax=None):\n sidx = 0 if xmin is None else np.searchsorted(self.xvec, [xmin])[0]\n eidx = len(self.xvec) if xmax is None else np.searchsorted(self.xvec, [xmax])[0]\n\n if eidx < len(self.xvec) and self.xvec[eidx] == xmax:\n eidx += 1\n\n xtemp = self.xvec[sidx:eidx]\n if xmin is not None and (len(xtemp) == 0 or xtemp[0] != xmin):\n np.insert(xtemp, 0, [xmin])\n if xmax is not None and (len(xtemp) == 0 or xtemp[-1] != xmax):\n np.append(xtemp, [xmax])\n return xtemp, self(xtemp)", "def extend_binning_for_coszen(self, ext_low=-3., ext_high=+3.):\n logging.trace(\"Preparing binning for flipback of reco kernel at\"\n \" coszen boundaries of physical range.\")\n\n cz_edges_out = self.output_binning['reco_coszen'].bin_edges.magnitude\n coszen_range = self.output_binning['reco_coszen'].range.magnitude\n n_cz_out = self.output_binning['reco_coszen'].size\n coszen_step = coszen_range/n_cz_out\n # we need to check for possible contributions from (-3, -1) and\n # (1, 3) in coszen\n assert ext_high > ext_low\n ext_range = ext_high - ext_low\n extended = np.linspace(ext_low, ext_high, int(ext_range/coszen_step) + 1)\n\n # We cannot flipback if we don't have -1 & +1 as (part of extended)\n # bin edges. This could happen if 1 is a multiple of the output bin\n # size, but the original edges themselves are not a multiple of that\n # size.\n for bound in (-1., +1.):\n comp = [recursiveEquality(bound, e) for e in extended]\n assert np.any(comp)\n\n # Perform one final check: original edges subset of extended ones?\n for coszen in cz_edges_out:\n comp = [recursiveEquality(coszen, e) for e in extended]\n assert np.any(comp)\n\n # Binning seems fine - we can proceed\n ext_cent = (extended[1:] + extended[:-1])/2.\n flipback_mask = ((ext_cent < -1. ) | (ext_cent > +1.))\n keep = np.where((ext_cent > cz_edges_out[0]) &\n (ext_cent < cz_edges_out[-1]))[0]\n cz_edges_out = extended\n logging.trace(\" -> temporary coszen bin edges:\\n%s\"%cz_edges_out)\n\n return cz_edges_out, flipback_mask, keep", "def merge_ranges():", "def _contiguous_ranges(span_list):\n output = []\n for _, span in itertools.groupby(\n enumerate(span_list), lambda p: p[1] - p[0]):\n span = list(span)\n output.append((span[0][1], span[-1][1]))\n return output", "def bed_to_interval(contig, bed_start, bed_end, name='', score='', strand='',\n block_ids='', superblock_ids=''):\n try:\n # assure positions to be integers\n # convert from 0,1-based to 1,1-based positions\n start = int(bed_start) + 1\n end = int(bed_end)\n except ValueError:\n raise ValueError(\"'start' and 'end' should be integers\")\n\n # perform sanity check to check for incorrect formatting\n assert (end - start) >= 0, (\"Not a valid BED interval.\"\n \"(bedEnd - bedStart) must be >= 0.\")\n\n # fallback to empty list for optional element ids\n ids = [element_ids.split(',') if element_ids else []\n for element_ids in (block_ids, superblock_ids)]\n\n return BaseInterval(contig, start, end, name, score, strand, *ids)", "def binnify(chromsizes, binsize, rel_ids=False):\n\n if not isinstance(binsize, int):\n raise ValueError(\"binsize must be int\")\n\n def _each(chrom):\n clen = chromsizes[chrom]\n n_bins = int(np.ceil(clen / binsize))\n binedges = np.arange(0, (n_bins + 1)) * binsize\n binedges[-1] = clen\n return pd.DataFrame(\n {\"chrom\": [chrom] * n_bins, \"start\": binedges[:-1], \"end\": binedges[1:]},\n columns=[\"chrom\", \"start\", \"end\"],\n )\n\n bintable = pd.concat(map(_each, chromsizes.keys()), axis=0, ignore_index=True)\n\n if rel_ids:\n bintable[\"rel_id\"] = bintable.groupby(\"chrom\").cumcount()\n\n # if as_cat:\n # bintable['chrom'] = pd.Categorical(\n # bintable['chrom'],\n # categories=list(chromsizes.keys()),\n # ordered=True)\n\n return bintable", "def fasta2bases(fastafn, ref, start, end, strands=\"+-\", n=3):\n fasta = pysam.FastaFile(fastafn)\n ref2len = {r: l for r, l in zip(fasta.references, fasta.lengths)}\n if ref not in ref2len: #fasta.references:\n raise StopIteration\n for pos, refbase in enumerate(fasta.fetch(ref, start, end), start+1):\n refbase = refbase.upper()\n # combine before start NNN (if needed) sequence from ref and after start NNN (if needed)\n mer = \"N\"*(n-pos+1) + \"\".join(fasta.fetch(ref, pos-n-1 if pos>n+1 else 0, pos+n)) + \"N\"*(pos-ref2len[ref]+n)\n mer = mer.upper() # need to be upper case\n for si, strand in enumerate(strands):\n if si:\n refbase = base2complement[refbase]\n mer = get_revcomp(mer)\n yield pos, si, strand, refbase, mer", "def bin_center_to_edges(centers):\n edges = bin_edges_to_center(centers)\n edges = np.append(centers[0]-(edges[0]-centers[0]), edges)\n edges = np.append(edges, centers[-1]+(centers[-1]-edges[-1]))\n return edges", "def _makebaselines(self):\n nholes = self.ctrs.shape[0]\n blist = []\n for i in range(nholes):\n for j in range(nholes):\n if i < j:\n blist.append((i, j))\n barray = np.array(blist).astype(np.int)\n #blname = []\n bllist = []\n for basepair in blist:\n # blname.append(\"{0:d}_{1:d}\".format(basepair[0],basepair[1]))\n baseline = self.ctrs[basepair[0]] - self.ctrs[basepair[1]]\n bllist.append(baseline)\n return barray, np.array(bllist)" ]
[ "0.57546645", "0.5614808", "0.53776675", "0.53201085", "0.52665704", "0.5239588", "0.5239492", "0.5221806", "0.519594", "0.5175467", "0.5016556", "0.50004125", "0.4980183", "0.49787596", "0.49721554", "0.49698713", "0.49678442", "0.49237615", "0.49228168", "0.49226806", "0.4920214", "0.49133766", "0.49076632", "0.49011803", "0.4887346", "0.48816252", "0.48768082", "0.4872485", "0.48649803", "0.48629653" ]
0.7329581
0
Find the location and size of each repeat in `values`.
def get_repeat_slices(values): # ENH: look into pandas groupby innards offset = 0 for idx, (_val, rpt) in enumerate(itertools.groupby(values)): size = len(list(rpt)) if size > 1: i = idx + offset slc = slice(i, i + size) yield slc, size offset += size - 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_repeating_frequency(values):\n frequencies = set([0])\n\n index = 0\n frequency = 0\n while True:\n found = False\n for value in values:\n frequency += value\n index += 1\n if frequency in frequencies:\n found = True\n break\n\n frequencies.add(frequency)\n\n if found:\n break\n\n return frequency", "def coordinate_Loc_Val(loc, values):\n assert loc is not None\n assert values is not None\n res = dict()\n for i in range(len(loc)):\n if loc[i] not in res: res[loc[i]] = values[i]\n else: res[loc[i]] += values[i]\n return list(res.keys()), [math.log(i+1) for i in list(res.values())]", "def iter_values_len(self):\n for key, values in self.data.items():\n yield key, len(values)", "def getIndexes(self, val):\n # Find where this value is listed. \n valNdx = (self.values == val).nonzero()[0]\n \n # If this value is not actually in those listed, then we \n # must return empty indexes\n if len(valNdx) == 0:\n start = 0\n end = 0\n else:\n # The index into counts, etc. for this value. \n valNdx = valNdx[0]\n start = self.start[valNdx]\n end = self.end[valNdx]\n \n # Create a tuple of index arrays, one for each index of the original array. \n ndx = ()\n for i in range(self.nDims):\n ndx += (self.indexes[start:end, i], )\n return ndx", "def search(self, values):\n if values is False:\n return False\n if all(len(values[square]) == 1 for square in self.squares):\n return values\n n, square = min((len(values[square]), square)\n for square in self.squares if len(values[square]) > 1)\n\n return self.possible_values(self.search(self.assign(values.copy(), square, dig))\n for dig in values[square])", "def __valuesToIndices(self, mappings, values):\n indices = np.empty(0, dtype=np.int_)\n\n for key, _ in mappings.items():\n # Lookup the index of the value of the values in the map.\n index = mappings[key](values[key])\n\n indices = np.hstack((indices, index))\n\n return indices", "def appearances(size, target):\n appears = 0\n for i in range(1, size + 1):\n for j in range(1, size + 1):\n if i * j == target:\n appears += 1\n return appears", "def counts(self, *vals):\n return ((v, self[self.hash(val)]) for v in vals)", "def convertSumsToMap(arr, values):\n num_map = {}\n for item in values:\n num_map[item] = []\n\n for i in range(len(arr)):\n num_map[i+1] = setFromValues(arr[i])\n\n return num_map", "def get_frequencies(valueList):\n \n valueSums = []\n values = []\n for value in valueList:\n try:\n index = values.index(value)\n valueSums[index]+=1\n except:\n values.append(value)\n valueSums.append(1)\n\n return values, valueSums", "def size(self) -> Tuple[int, int]:\n count_keys = 0 # store the number of different 'key'.\n count_values = 0 # store the the number of different 'value'.\n for node in self.hashTable:\n count_values = count_values + node.count\n count_keys = count_keys + len(node.keys)\n return count_keys, count_values", "def countDistinctValues(array, pos = -1):\n\t\n#\tfrom jot.basic.checkElementInArray import checkElementInArray\n\n\tfinalDict = {}\n\t\n\tfor i in array:\n\t\t\n\t\tmatch = 0\n\t\t\n\t\tif pos == -1:\n\t\t\tcheckValue = i\n\t\telse:\n\t\t\tcheckValue = i[pos]\n\t\t\t\n\t\tmatch = checkElementInArray(checkValue,finalDict)\n\t\t\n\t\tif match == False:\n\t\t\tfinalDict[checkValue] = 1\n\t\telse:\n\t\t\tfinalDict[checkValue] = finalDict[checkValue]+1\n\t\t\t\n\treturn finalDict", "def nunique(self, values):\n return self.aggregate(values, \"nunique\")", "def count_entries(numbers):\n nodes = numbers[0]\n mt_entries = numbers[1]\n\n total = 0\n offset = 2\n for _ in range(nodes):\n entries, value = count_entries(numbers[offset:])\n offset += entries\n total += value\n\n for entry in numbers[offset:offset+mt_entries]:\n total += entry\n offset += 1\n return offset, total", "def _getValueCounts(mapping):\n return Counter({k: len(v) for k, v in viewitems(mapping)})", "def size(self):\n return len(self.values)", "def dimensions( cls, value, typeCode=None ):\n return value.shape", "def high_count(values):\n length = len(values)\n arr = np.zeros(length, dtype=np.int16)\n count = 0\n max_val = values[0]\n for i in np.arange(1, length):\n if values[i] > max_val:\n max_val = values[i]\n count += 1\n arr[i] = count\n return arr", "def count(self, value: object) -> int:\n # Initializes count to zero.\n count = 0\n # Loops through the indices of the dynamic array and if the value is found, the count increments.\n end = self.size()\n for ind in range(end):\n if self.da[ind] == value:\n count += 1\n # Count is returned.\n return count", "def find_dimensions(seq):\n sizes = []\n if _is_sequence(seq):\n sizes.append(len(seq))\n sizes += find_dimensions(seq[0])\n return sizes", "def similiar_chunks_indexes(n_values, n_chunks) -> Generator[Tuple[int, int], None, None]:\n chunk_size = int(numpy.ceil(n_values / n_chunks))\n for i in range(0, n_values, chunk_size):\n yield i, i + chunk_size", "def __len__(self):\n return len(self._values)", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def get_size(self, valueid):", "def count_values_in_list(self,list_,value,start=0,end=None):\r\n return self.get_slice_from_list(list_,start,end).count(value)", "def nunique(self, values: groupable) -> Tuple[groupable, pdarray]:\n # TO DO: defer to self.aggregate once logic is ported over to Chapel\n # return self.aggregate(values, \"nunique\")\n togroup = self._nested_grouping_helper(values)\n # Find unique pairs of (key, val)\n g = GroupBy(togroup)\n # Group unique pairs again by original key\n g2 = GroupBy(g.unique_keys[0], assume_sorted=False)\n # Count number of unique values per key\n keyorder, nuniq = g2.count()\n # The last GroupBy *should* result in sorted key indices, but in case it\n # doesn't, we need to permute the answer to match the original key order\n if not is_sorted(keyorder):\n perm = argsort(keyorder)\n nuniq = nuniq[perm]\n # Re-join unique counts with original keys (sorting guarantees same order)\n return self.unique_keys, nuniq", "def findRank(e, values):\n\tcount = 1\n\tfor ve in values:\n\t\tif ve < e:\n\t\t\tcount += 1\n\treturn count", "def _get_num_values_per_instance_in_sparse_batch(batch_indices: np.ndarray,\n batch_size: int) -> List[int]:\n result = [0] * batch_size\n for arr in batch_indices:\n result[arr[0]] += 1\n return result", "def map_values_to_value_list(value_list, values):\n return [value_list.index(x) for x in values]" ]
[ "0.60911363", "0.5886676", "0.5794653", "0.57471794", "0.5594755", "0.5532977", "0.5366681", "0.53395396", "0.5300417", "0.5274737", "0.5222571", "0.52221423", "0.5220522", "0.5208285", "0.5179138", "0.5176746", "0.5162105", "0.51443595", "0.51057637", "0.5093284", "0.50863653", "0.507727", "0.5066931", "0.5066931", "0.50363386", "0.50315756", "0.50277686", "0.5027504", "0.5017888", "0.4997483" ]
0.6345479
0
Find the chromosomal position of each named gene in probes. Returns dict
def gene_coords_by_name(probes, names): names = list(filter(None, set(names))) if not names: return {} # Create an index of gene names gene_index = collections.defaultdict(set) for i, gene in enumerate(probes["gene"]): for gene_name in gene.split(","): if gene_name in names: gene_index[gene_name].add(i) # Retrieve coordinates by name all_coords = collections.defaultdict(lambda: collections.defaultdict(set)) for name in names: gene_probes = probes.data.take(sorted(gene_index.get(name, []))) if not len(gene_probes): raise ValueError(f"No targeted gene named {name!r} found") # Find the genomic range of this gene's probes start = gene_probes["start"].min() end = gene_probes["end"].max() chrom = core.check_unique(gene_probes["chromosome"], name) # Deduce the unique set of gene names for this region uniq_names = set() for oname in set(gene_probes["gene"]): uniq_names.update(oname.split(",")) all_coords[chrom][start, end].update(uniq_names) # Consolidate each region's gene names into a string uniq_coords = {} for chrom, hits in all_coords.items(): uniq_coords[chrom] = [ (start, end, ",".join(sorted(gene_names))) for (start, end), gene_names in hits.items() ] return uniq_coords
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map_probes(probeset, entrez_ids): \n entrez_idx = None\n mapping = {}\n with open(probeset) as probes:\n for line in probes:\n if line.startswith('ID'):\n entrez_idx = line.split('\\t').index('ENTREZ_GENE_ID')\n elif entrez_idx:\n # if the index has been defined then we're past the header\n row = [x.strip() for x in line.split('\\t')]\n # if we're doing percentile rank, we need all the mappings, otherwise can just track the mappings of interest\n if PERCENTILE_RANK:\n if '///' in row[entrez_idx]:\n # multile genes add an entry for every gene overlapped by the probe\n # TODO: FIX; THIS IS A MANY TO MANY MAPPING ISSUE \n # since this only happens once in this dataset, I'm just using the first one but can also use last (or develop a solution that works for all cases...)\n mapping[row[0]] = row[entrez_idx].split(' /// ')[0]\n \"\"\" # option to use the last one \n for entrez_id in [x for x in row[entrez_idx].split(' /// ')]:\n print('Entrez ID:'+str(entrez_id)+' in probe that maps to multiple genes')\n mapping[row[0]] = entrez_id[0] \n \"\"\"\n print('MANY TO MANY: '+str(row[0])+\"->\"+str(row[entrez_idx]))\n else:\n mapping[row[0]] = row[entrez_idx]\n elif row[entrez_idx] in entrez_ids:\n mapping[row[0]] = row[entrez_idx]\n\n return mapping", "def extract_header_info_from_probes(filename):\n header_info = {}\n with open(filename) as probe_fasta:\n for line in probe_fasta:\n if line.startswith('>'): # if line is fasta header\n gene_name = extract_from_header(line, 'gene=', '\\&')\n var_name = extract_from_header(line, 'var_name=', '\\&')\n # check if gene is already in header dictionary\n if gene_name in header_info:\n header_info[gene_name].add(var_name)\n else:\n header_info[gene_name] = {var_name}\n return header_info", "def gene_coords_by_range(probes, chrom, start, end, ignore=params.IGNORE_GENE_NAMES):\n ignore += params.ANTITARGET_ALIASES\n # Tabulate the genes in the selected region\n genes = collections.OrderedDict()\n for row in probes.in_range(chrom, start, end):\n name = str(row.gene)\n if name in genes:\n genes[name][1] = row.end\n elif name not in ignore:\n genes[name] = [row.start, row.end]\n # Reorganize the data structure\n return {\n chrom: [(gstart, gend, name) for name, (gstart, gend) in list(genes.items())]\n }", "def GetGeneName(arg):\n\n genbank = ChromUnzip(arg)\n \n p1=re.compile(r'(?:ACCESSION\\s+)(\\w+\\d+)')\n p6=re.compile(r'(?:/gene=\")(.+?)(?:\"\\s+)')\n\n gene_name_dict={}\n \n for entry in genbank:\n gene_list=[] \n gene_it_6=p6.finditer(entry)\n gene_it_1=p1.finditer(entry) \n for hit in gene_it_6:\n gene_list.append(hit.group(1))\n for item in gene_it_1:\n gene_name_dict[item.group(1)]=gene_list[0]\n \n return gene_name_dict", "def _get_positions(self):\n position_map = dict()\n # Assumes that the positions are indexed in the order of Row-->Well-->FOV\n for well in self.wells:\n for pos in self.store[well].attrs.get('well').get('images'):\n pos_name = pos['path']\n # pos name is 'Pos_xxx'\n pos_idx = int(pos_name.split('_')[-1])\n position_map[pos_idx] = {'name': pos_name, 'well': well}\n return position_map", "def _get_gene_map(self) -> OrderedDict:\n if \"gene\" not in self.data:\n return OrderedDict()\n\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data[\"gene\"].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(\",\"):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes", "def processFile(filename):\n geneDict = {}\n pat = re.compile(r'gene_index (\\d+)')\n gf = open(filename)\n for line in gf:\n line = line.strip()\n if line.startswith('#'):\n continue\n t = line.split('\\t')\n if len(t) != 9 and len(t) !=8:\n continue\n if t[2] not in ['CDS', 'UTR']:\n continue\n s = t[8].split(';')\n m = re.search(pat, s[1])\n if m is None:\n raise RuntimeError('bad regex, unable to pull out gene index for line %s' % line)\n geneIndex = m.group(1)\n if geneIndex not in geneDict:\n g = Gene()\n geneDict[geneIndex] = g\n else:\n g = geneDict[geneIndex]\n if t[2] == 'CDS':\n g.numCds += 1\n elif t[2] == 'UTR':\n g.numUtr += 1\n if g.left > int(t[3]):\n g.left = int(t[3])\n g.updateLength()\n if g.right < int(t[4]):\n g.right = int(t[4])\n g.updateLength()\n return geneDict", "def probes(self):\r\n return probes.Probes(self)", "def gene_coding_exon_positions(genefile, ignore_strange_cases=False):\n # MAYBE-TODO add options for all features? If needed.\n # MAYBE-TODO add option to deal with multiple-splice-variant cases? But they might just make the data too messy.\n gene_feature_positions = {}\n with open(os.path.expanduser(genefile)) as GENEFILE:\n for chromosome_record in GFF.parse(GENEFILE, limit_info={}):\n for gene_record in chromosome_record.features:\n if len(gene_record.sub_features) != 1: \n if ignore_strange_cases: continue\n else: raise NoRNAError(\"Gene %s has no mRNA or multiple mRNAs!\"%gene_record.id)\n features = gene_record.sub_features[0].sub_features\n CDS_positions = sorted(get_feature_start_end(feature) for feature in features if feature.type=='CDS')\n gene_feature_positions[gene_record.id] = CDS_positions\n return gene_feature_positions", "def get_probe_address(elf_path, probes, section='.text'):\n assert len(probes) <= 26, 'Too many probes'\n\n text_data = objdump_section(elf_path, '.text')\n name_to_addr = parse_func_names(text_data)\n\n probe_names = list(string.ascii_uppercase)\n name_idx = 0 \n\n ret = []\n\n for probe in probes: \n assert probe in name_to_addr, '{} not found'.format(probe)\n ret.append('{}:0x{}'.format(probe_names[name_idx], name_to_addr[probe]))\n name_idx += 1 \n\n return ret", "def build_gene_indexes(df):\n\tgeneDict = OrderedDict()\n\n\tgeneCount = 0\n\tpreviousGeneIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_gene=\"\"\n\n\tfor i in range(len(df)):\n\n\t\tif df.loc[i,'feature'] == 'gene':\n\t\t\ttrdict = parse_entry(df.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\t\t\n\t\t\tif geneCount != 0:\n\t\t\t\tnewGeneIndex = i\n\t\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\t\t\t\tpreviousGeneIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\tgeneCount += 1\n\n\t\t\telse:\n\t\t\t\tnewgeneIndex = 0\n\t\t\t\tgeneCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\tif i == (len(df)-1):\n\t\t\tnewGeneIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\treturn geneDict", "def get_expression(data_series, probes_to_genes):\n with open(data_series, 'r') as mtx:\n stage_columns = {'all_stages': {'sample_ids': []}} # will always need an average, other stages are determined by the file\n sample_ids = None\n for line in mtx:\n if line.startswith('!Sample_title'):\n sample_stages = [x.strip().replace('\"','').split(\",\")[0] for x in line.split(\"\\t\")[1:]] # this line is likely dataset specific.\n elif line.startswith('\"ID_REF\"'): # this comes after the sample titles\n sample_ids = [x.strip().replace('\"','') for x in line.split(\"\\t\")[1:]]\n # now have the ids and their stages, convert to dict\n \"\"\"\n if named differently, may need to modify this.\n ultimately, stage_columns should be a dictionary with the following properties:\n - the keys are the stage names. \n - each 'stage' dict should have a key 'sample_ids' that has a list the sample_ids belonging to that stage.\n {\n 'stage1': {\n 'sample_ids': ['sample_id1','sample_id2', ..., 'sample_idn']\n },\n 'stage2': {\n 'sample_ids': ['sample_idn+1', ...]\n },\n ...\n }\n \"\"\"\n for i in range(0, len(sample_stages)):\n if sample_stages[i] not in stage_columns:\n stage_columns[sample_stages[i]] = {'sample_ids': []}\n stage_columns[sample_stages[i]]['sample_ids'].append(sample_ids[i])\n stage_columns['all_stages']['sample_ids'].append(sample_ids[i]) # add every sample to this\n elif sample_ids is not None:\n row = [x.strip().replace('\"','') for x in line.split('\\t')]\n \"\"\"\n here, the stage_columns dictionary is being updated with the expression data for each gene.\n {\n 'stage1': {\n 'sample_ids': ['sample_id1','sample_id2', ..., 'sample_idn'],\n 'genes': { <- **NEW KEY**\n 'entrezID-1': ['sample_id1ExpLevel', 'sample_id2ExpLevel', ..., 'sample_idnExpLevel'],\n 'entrezID-2': ['sample_id1ExpLevel', 'sample_id2ExpLevel', ..., 'sample_idnExpLevel'],\n ... (if PERCENTILE_RANK is True, all in dataset are recorded otherwise, just the genes of interest )\n }\n },\n ...\n }\n \"\"\"\n if row[0] in probes_to_genes:\n # get gene from probe\n entrez_id = probes_to_genes[row[0]]\n # add the average expression for all the samples in a stage for the gene\n for stage, stage_data in stage_columns.items():\n stage_data['genes'] = {} if 'genes' not in stage_data else stage_data['genes'] # initialize\n for sample_id in stage_data['sample_ids']:\n # get the index of the sample_id in the row\n sample_idx = sample_ids.index(sample_id) + 1\n if entrez_id not in stage_data['genes']:\n stage_data['genes'][entrez_id] = [float(row[sample_idx])]\n else:\n stage_data['genes'][entrez_id].append(float(row[sample_idx]))\n\n return stage_columns", "def get_probeLocs_calib_setup(dir, num_probes = 16):\n position_vectors = [[0] * 3 for i in range(num_probes)]\n\n #every x postion\n\n # Convert to meters\n x_pos = [-4.25*1e-3*25.4, -4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4]\n y_pos = [-4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4, -4.25*1e-3*25.4]\n z_pos = [-2.25*1e-3*25.4, -0.75*1e-3*25.4, 0.75*1e-3*25.4, 2.25*1e-3*25.4]\n x = 0\n for i in range(num_probes):\n if(i%4 ==0 and i>0):\n x+=1\n position_vectors[i][0] =x_pos[x]\n position_vectors[i][1] = y_pos[x]\n position_vectors[i][2] =z_pos[i%4]\n # print(position_vectors[i][0])\n\n \"\"\" Now take into account the direction\n r shots : x,y,z - > r,t,z\n t shots : x,y,z - > r,t,z\n z shots : x,y,z - > r,t,z\n \"\"\"\n if dir ==2 :#r\n # don't need to switch anything\n return position_vectors\n if dir == 0:#t\n # looks like -90 degree rotation about y-axis of probe coil orientation, so switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n if dir ==1:#z\n # also like -90 degree rotation, switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n\n return position_vectors", "def locate_peaks(vox_coords):\n sub_names = harvard_oxford_sub_names\n ctx_names = harvard_oxford_ctx_names\n at_dir = op.join(os.environ[\"FSLDIR\"], \"data\", \"atlases\")\n ctx_data = nib.load(op.join(at_dir, \"HarvardOxford\",\n \"HarvardOxford-cort-prob-2mm.nii.gz\")).get_data()\n sub_data = nib.load(op.join(at_dir, \"HarvardOxford\",\n \"HarvardOxford-sub-prob-2mm.nii.gz\")).get_data()\n\n loc_list = []\n for coord in vox_coords:\n coord = tuple(coord)\n ctx_index = np.argmax(ctx_data[coord])\n ctx_prob = ctx_data[coord][ctx_index]\n sub_index = np.argmax(sub_data[coord])\n sub_prob = sub_data[coord][sub_index]\n\n if not max(sub_prob, ctx_prob):\n loc_list.append((\"Unknown\", 0))\n continue\n if not ctx_prob and sub_index in [0, 11]:\n loc_list.append((sub_names[sub_index], sub_prob))\n continue\n if sub_prob > ctx_prob and sub_index not in [0, 1, 11, 12]:\n loc_list.append((sub_names[sub_index], sub_prob))\n continue\n loc_list.append((ctx_names[ctx_index], ctx_prob))\n\n return pd.DataFrame(loc_list, columns=[\"MaxProb Region\", \"Prob\"])", "def get_probeLocs_calib_setup_cm(dir, num_probes = 16):\n position_vectors = [[0] * 3 for i in range(num_probes)]\n\n #every x postion\n\n # Convert to meters\n x_pos = [-4.25*2.54, -4.25*2.54, 4.24*2.54, 4.24*2.54]\n y_pos = [-4.25*2.54, 4.24*2.54, 4.24*2.54, -4.25*2.54]\n z_pos = [-2.25*2.54, -0.75*2.54, 0.75*2.54, 2.25*2.54]\n x = 0\n for i in range(num_probes):\n if(i%4 ==0 and i>0):\n x+=1\n position_vectors[i][0] =x_pos[x]\n position_vectors[i][1] = y_pos[x]\n position_vectors[i][2] =z_pos[i%4]\n # print(position_vectors[i][0])\n\n \"\"\" Now take into account the direction\n r shots : x,y,z - > r,t,z\n t shots : x,y,z - > r,t,z\n z shots : x,y,z - > r,t,z\n \"\"\"\n if dir ==2 :#r\n # don't need to switch anything\n return position_vectors\n if dir == 0:#t\n # looks like -90 degree rotation about y-axis of probe coil orientation, so switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n if dir ==1:#z\n # also like -90 degree rotation, switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n\n return position_vectors", "def get_probes(self):\n # TODO: might create a cached list of objects\n # TODO: instead of returning the simple json dict\n for probe in self.probes.values():\n yield probe", "def _retrieve_motor_positions(doc_collector, motors):\n motor_names = [_.name for _ in motors]\n # Get the event list for the first run\n desc = next(iter(doc_collector.event.keys())) # Descriptor\n event_list = doc_collector.event[desc]\n\n # Now collect the positions\n positions = {k: [] for k in motor_names}\n for event in event_list:\n for name in positions.keys():\n positions[name].append(event[\"data\"][name])\n\n return positions", "def organize_by_chromosome(genes, transcripts):\n gene_dict = {}\n transcript_dict = {}\n\n for ID in genes:\n gene = genes[ID]\n chromosome = gene.chromosome\n if chromosome not in gene_dict:\n chrom_genes = {}\n chrom_genes[ID] = gene\n gene_dict[chromosome] = chrom_genes\n gene_dict[chromosome][ID] = gene\n\n for ID in transcripts:\n transcript = transcripts[ID]\n chromosome = transcript.chromosome\n if chromosome not in transcript_dict:\n chrom_transcripts = {}\n chrom_transcripts[ID] = transcript\n transcript_dict[chromosome] = chrom_transcripts\n transcript_dict[chromosome][ID] = transcript\n transcript_dict[chromosome][ID] = transcript\n\n return gene_dict, transcript_dict", "def get_gene_values(hotel_ids):\n hotel_genes = {}\n subcats = get_subcat_axes()\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"\n SELECT hotel_id, genome\n FROM hotel_genome\n WHERE hotel_id in (%s)\n \"\"\" % \",\".join([str(h) for h in hotel_ids])\n )\n for hotel_id, genome_str in cursor.fetchall():\n genome = [float(g.strip()) for g in genome_str.split(\",\")]\n hotel_genes[hotel_id] = get_hotel_genes_by_subcat(\n subcats, genome)\n return subcats, hotel_genes", "def get_positions(directory): \n positions = {}\n names = {}\n pos_dict = {'1': \"GK\", '2': \"DEF\", '3': \"MID\", '4': \"FWD\"}\n fin = open(directory + \"/players_raw.csv\", 'rU',encoding=\"utf-8\")\n reader = csv.DictReader(fin)\n for row in reader:\n positions[int(row['id'])] = pos_dict[row['element_type']] \n names[int(row['id'])] = row['first_name'] + ' ' + row['second_name']\n return names, positions", "def Genes_Per_Genome(Input, Gene_Separator,Contig_Separator):\n Number_Genes = {}\n Gene_Length = {}\n with open(Input) as FastAInput:\n for line in FastAInput:\n if \">\" in line:\n Gene = line.split()[0].replace(\">\",\"\")\n Gene_Length[Gene] = 0\n Genome = Gene.split(Contig_Separator)\n Genome = Contig_Separator.join(Genome[:-1])\n Number_Genes[Genome] = Number_Genes.get(Genome, 0) + 1\n else:\n line = line.strip()\n Gene_Length[Gene] += len(line)\n return (Number_Genes, Gene_Length)", "def FindProIDfromGeneID(geneName, strainName, mRNA_protein_dict=mRNA_protein):\n\n # in the first step here, we must find the right gene id based on the part gene id from raven\n dir0 = '../0_332yeast_genomes/332_genome_annotations/proteome_old_species_id/'\n # strain1 = 'candida_sorboxylosa'\n strain_dir = dir0 + strainName + '.max.pep'\n protein_faa = open(strain_dir, 'r').readlines()\n protein_faa_id = [x for x in protein_faa if '>' in x]\n # next based on the above gene0, we find the related right mRNAid\n gene1 = [x.replace('>', '').strip('\\n') for x in protein_faa_id if geneName in x]\n protein_id = []\n for x in gene1:\n print(mRNA_protein_dict[x])\n protein_id.append(mRNA_protein_dict[x])\n return protein_id", "def get_probe_location(self):\n\n probe_x, probe_y = self.position\n\n if self.previous_direction == (1, 0):\n probe_x += CAR_LENGTH - 1\n elif self.previous_direction == (0, 1):\n probe_y += CAR_LENGTH - 1\n\n return probe_x, probe_y", "def gene(self, idx):\r\n return self.genes[idx]", "def probe(self) -> dict:\n result = {}\n for name, func in self._probes.items():\n result[name] = func()\n return result", "def getAttendance(self):\n probes = []\n allParticles = self.pid_lookup.items()\n allParticles.sort(key=lambda x: abs(x[1]))\n for aParticle, pid in allParticles:\n numberOfEvents = self.db.selectFromTable(\"multiplicities\", \"count()\", \"pid = %d\" % pid)[0][0]\n probes.append((aParticle, numberOfEvents))\n return probes", "def getProbeList(probeNames, probeMap):\n trace (\"getProbeList(%s)\" %probeNames)\n probeList = []\n probeNameList = probeNames.split(',')\n for probeName in probeNameList:\n probe = probeMap.get(probeName.strip().lower())\n if probe:\n probeList.append (probe)\n else:\n print \"Unable to find %s in %s\" %(probeName,str(probeMap))\n return probeList", "def genome_index_to_dict(self, index):\n chrom_pos = self.chrom_and_pos(index)\n return {'Chromosome': chrom_pos[0], 'Position': chrom_pos[1]}", "def init_gene():\n gene_details=dict(\n id = '', \n anno_id = [],\n confgenes_id = [],\n name = '',\n source = '',\n gene_info = {},\n alias = '',\n name2 = [],\n strand = '',\n chr = '',\n chr_num = [],\n paralogs = [],\n start = '',\n stop = '',\n transcripts = [],\n transcript_info = [],\n transcript_status = [],\n transcript_valid = [],\n exons = [],\n exons_confirmed = [],\n cds_exons = [],\n utr5_exons = [],\n utr3_exons = [],\n tis = [],\n tis_conf = [],\n tis_info = [],\n cdsStop = [],\n cdsStop_conf = [],\n cdsStop_info = [],\n tss = [],\n tss_info = [],\n tss_conf = [],\n cleave = [],\n cleave_info = [],\n cleave_conf = [],\n polya = [],\n polya_info = [],\n polya_conf = [],\n is_alt = [],\n is_alt_spliced = 0,\n is_valid = [],\n transcript_complete = [],\n is_complete = [],\n is_correctly_gff3_referenced = '',\n splicegraph = []\n )\n return gene_details", "def get_genes(variant):\n genes = {}\n transcripts = []\n mongo_genes = []\n \n # Conversion from ensembl to refseq\n # ensembl_to_refseq is a dictionary with ensembl transcript id as keys and\n # a list of refseq ids as values\n ensembl_to_refseq = {}\n for gene_info in variant['info_dict'].get(\n 'Ensembl_transcript_to_refseq_transcript', []):\n splitted_gene = gene_info.split(':')\n transcript_info = splitted_gene[1]\n for transcript in transcript_info.split('|'):\n splitted_transcript = transcript.split('>')\n if len(splitted_transcript) > 1:\n ensembl_id = splitted_transcript[0]\n refseq_ids = splitted_transcript[1].split('/')\n ensembl_to_refseq[ensembl_id] = refseq_ids\n \n # A dictionary with clinical gene descriptions\n gene_descriptions = {}\n for gene_info in variant['info_dict'].get('Gene_description', []):\n splitted_gene = gene_info.split(':')\n hgnc_symbol = splitted_gene[0]\n description = splitted_gene[1]\n gene_descriptions[hgnc_symbol] = description\n \n # First we get all vep entrys that we find and put them under their \n # corresponding gene symbol in 'genes'\n for vep_entry in variant['vep_info'].get(variant['ALT'], []):\n transcript = get_transcript(vep_entry, ensembl_to_refseq)\n hgnc_symbol = transcript.hgnc_symbol\n if hgnc_symbol:\n if hgnc_symbol in genes:\n genes[hgnc_symbol]['transcripts'][transcript.transcript_id] = transcript\n for functional_annotation in transcript.functional_annotations:\n new_rank = SO_TERMS[functional_annotation]['rank']\n if new_rank < genes[hgnc_symbol]['best_rank']:\n genes[hgnc_symbol]['best_rank'] = new_rank\n genes[hgnc_symbol]['most_severe_transcript'] = transcript\n genes[hgnc_symbol]['most_severe_function'] = functional_annotation\n \n else:\n genes[hgnc_symbol] = {}\n genes[hgnc_symbol]['transcripts'] = {}\n genes[hgnc_symbol]['transcripts'][transcript.transcript_id] = transcript\n genes[hgnc_symbol]['most_severe_transcript'] = transcript\n genes[hgnc_symbol]['omim_gene_id'] = None\n genes[hgnc_symbol]['phenotypic_terms'] = []\n genes[hgnc_symbol]['best_rank'] = 40\n genes[hgnc_symbol]['ensembl_id'] = transcript.ensembl_id\n \n for functional_annotation in transcript.functional_annotations:\n new_rank = SO_TERMS[functional_annotation]['rank']\n if new_rank < genes[hgnc_symbol]['best_rank']:\n genes[hgnc_symbol]['best_rank'] = new_rank\n genes[hgnc_symbol]['most_severe_function'] = functional_annotation\n \n \n ######################################################################\n ## There are two types of OMIM terms, one is the OMIM gene entry ##\n ## and one is for the phenotypic terms. ##\n ## Each key in the 'omim_terms' dictionary reprecents a gene id. ##\n ## Values are a dictionary with 'omim_gene_id' = omim_gene_id and ##\n ## 'phenotypic_terms' = [list of OmimPhenotypeObjects] ##\n ######################################################################\n\n # Fill the omim gene id:s:\n for annotation in variant['info_dict'].get('OMIM_morbid', []):\n if annotation:\n splitted_record = annotation.split(':')\n try:\n hgnc_symbol = splitted_record[0]\n omim_term = splitted_record[1]\n genes[hgnc_symbol]['omim_gene_id'] = omim_term\n except (ValueError, KeyError):\n pass\n\n # Fill the omim phenotype terms:\n for gene_annotation in variant['info_dict'].get('Phenotypic_disease_model', []):\n if gene_annotation:\n splitted_gene = gene_annotation.split(':')\n hgnc_symbol = splitted_gene[0]\n for omim_entry in splitted_gene[1].split('|'):\n splitted_record = omim_entry.split('>')\n \n phenotype_id = splitted_record[0]\n inheritance_patterns = []\n if len(splitted_record) > 1:\n inheritance_patterns = splitted_record[1].split('/')\n \n disease_model = PhenotypeTerm(\n phenotype_id=phenotype_id,\n disease_models=inheritance_patterns\n )\n \n genes[hgnc_symbol]['phenotypic_terms'].append(disease_model)\n \n for hgnc_symbol in genes:\n gene_info = genes[hgnc_symbol]\n most_severe = gene_info['most_severe_transcript']\n # Create a mongo engine gene object for each gene found in the variant\n mongo_gene = Gene(hgnc_symbol=hgnc_symbol)\n mongo_gene.description = gene_descriptions.get(hgnc_symbol)\n mongo_gene.ensembl_gene_id = gene_info.get('ensembl_id', None)\n mongo_gene.omim_gene_entry = gene_info.get(\n 'omim_gene_id', \n None\n )\n\n mongo_gene.omim_phenotypes = gene_info.get(\n 'phenotypic_terms', \n []\n )\n\n # Add a list with the transcripts:\n mongo_gene.transcripts = []\n for transcript_id in gene_info['transcripts']:\n mongo_gene.transcripts.append(gene_info['transcripts'][transcript_id])\n\n try:\n mongo_gene.functional_annotation = gene_info['most_severe_function']\n except AttributeError:\n pass\n try:\n mongo_gene.region_annotation = SO_TERMS[mongo_gene.functional_annotation]['region']\n except AttributeError:\n pass\n try:\n mongo_gene.sift_prediction = most_severe.sift_prediction\n except AttributeError:\n pass\n try:\n mongo_gene.polyphen_prediction = most_severe.polyphen_prediction\n except AttributeError:\n pass\n # Add the mongo engine gene to the dictionary\n mongo_genes.append(mongo_gene)\n\n return mongo_genes" ]
[ "0.63751584", "0.6185006", "0.59191895", "0.57846487", "0.56714374", "0.56594753", "0.5627719", "0.5605826", "0.5570916", "0.55197716", "0.5507233", "0.5483343", "0.5474914", "0.53773415", "0.537726", "0.5336808", "0.5323498", "0.5316965", "0.5203386", "0.5173213", "0.51682794", "0.5165912", "0.5157432", "0.51450366", "0.51196843", "0.50982267", "0.50888145", "0.50689965", "0.505753", "0.50555754" ]
0.6813666
0
Find the chromosomal position of all genes in a range. Returns dict
def gene_coords_by_range(probes, chrom, start, end, ignore=params.IGNORE_GENE_NAMES): ignore += params.ANTITARGET_ALIASES # Tabulate the genes in the selected region genes = collections.OrderedDict() for row in probes.in_range(chrom, start, end): name = str(row.gene) if name in genes: genes[name][1] = row.end elif name not in ignore: genes[name] = [row.start, row.end] # Reorganize the data structure return { chrom: [(gstart, gend, name) for name, (gstart, gend) in list(genes.items())] }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def range_dic_(df_):\n range_dic = {}\n for man in df_['maneuver']:\n trial_indx = df_.index[df_['maneuver'] == man].tolist()\n range_ = (min(trial_indx), max(trial_indx))\n range_dic.update({man: range_})\n return range_dic", "def range_dic_(df_):\n range_dic = {}\n for man in df_['maneuver']:\n trial_indx = df_.index[df_['maneuver'] == man].tolist()\n range_ = (min(trial_indx), max(trial_indx))\n range_dic.update({man: range_})\n return range_dic", "def get_genomic_range( self ):\n return self.snv_chrom + ':' + str( self.snv_start ) + '-' + str( self.snv_end )", "def count_property_range_hits(prop, node_dict, hits):\n\tres = []\n\t# sets tuple position to use in dict value\n\tswitcher = {\n \"length\": (0,(0,4000,8000,12000,16000,20000)),\n \"steps\": (1,(0,2,4,8,16,32)),\n \"cov\": (2,(1,10,100,1000,10000,100000)),\n \"cv\": (3, (0,0.05,0.10,0.15,0.20,0.25))\n }\n\tif prop not in switcher:\n\t\treturn res\n\ttup_pos = switcher[prop][0]\n\tnode_cnt = 0\n\tpos_cnt = 0\n\tfor ind in range(len(switcher[prop][1])-1):\n\t\tmin_val = switcher[prop][1][ind]\n\t\tmax_val = switcher[prop][1][ind+1]\n\t\tfor node in node_dict.keys():\n\t\t\tval = node_dict[node][tup_pos]\n\t\t\tif ind < len(switcher[prop][1])-2:\n\t\t\t\trange_test_val = (min_val <= val < max_val)\n\t\t\telse:\n\t\t\t\trange_test_val = (min_val <= val <= max_val)\n\t\t\t# print \"range bool is\", range_test_val\n\t\t\tif range_test_val:\n\t\t\t\tnode_cnt += 1\n\t\t\t\tif node in hits: pos_cnt += 1\n\t\tif node_cnt > 0:\n\t\t\tres.append( (pos_cnt, node_cnt, round(float(pos_cnt)/node_cnt,2)))\n\t\telse:\n\t\t\tres.append((0,0,0))\n\t\tnode_cnt = 0\n\t\tpos_cnt = 0\n\treturn res", "def all_best_pos(potential_coverage, all_coverage, covered):\n ## get closeness to 5'\n proximity = {grna_seq: sum((hit.target_len - hit.range[1] if\n hit.target.sense == '-' else\n hit.range[0])\n for hit in hits)/len(hits)\n for grna_seq, hits in potential_coverage.items()}\n best_proximity = min(proximity.values())\n return {grna_seq: potential_coverage[grna_seq]\n for grna_seq, prox in proximity.items()\n if prox == best_proximity}", "def getTargetPositions(rg):\n targetPositions = OrderedDict()\n for r in rg.robotDict.values():\n x, y, z = r.metFiberPos\n targetPositions[r.id] = [x, y]\n return targetPositions", "def _get_gene_map(self) -> OrderedDict:\n if \"gene\" not in self.data:\n return OrderedDict()\n\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data[\"gene\"].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(\",\"):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes", "def read_chr(fpath):\n\t# init dict and indices\n\tchrom_dicts={}\n\tstart=0\n\tindex=0\n\n\t# iterate through chromosome scores \n\tfor line in fileinput.input(fpath):\n\t\tx=line.split()\n\t\t\n\t\t# if chromosome skips some region, then normalize the previous window (<100 bp) and init new window \t\n\t\tif len(x)==4:\n\t\t\tif start in chrom_dicts:\n\t\t\t\tchrom_dicts[start]/=index\n\t\t\tstart=int(x[2].split(\"=\")[1])\n\t\t\tchrom_dicts[start]=0\n\t\t\tindex=0\n\n\t\t# if not a black region, then make news windows every 100 locations\n\t\tif len(x)==1:\n\t\t\tchrom_dicts[start]+=float(x[0])\n\t\t\tif index==100:\n\t\t\t\tchrom_dicts[start]/=index\n\t\t\t\tindex=0\n\t\t\t\tstart+=100\n\t\t\t\tchrom_dicts[start]=0\n\t\t\tindex+=1\n\t\n\t# track chromosomes that have been binned\n\tprint(\"%s %d\" % (fpath,len(chrom_dicts)))\n\treturn(chrom_dicts)", "def get_gtf_region_position_info(region_gtf_pr):\n ## Check if region has no features\n if region_gtf_pr.empty:\n return dict()\n\n gene_info_dict = dict()\n for name, group in region_gtf_pr.df.groupby(\"transcript_id\"):\n for row in group.itertuples():\n\n ## Add Transcript Info\n if row.Feature == \"transcript\":\n\n if row.gene_id not in gene_info_dict:\n gene_info_dict[row.gene_id] = dict()\n if row.transcript_id not in gene_info_dict[row.gene_id]:\n gene_info_dict[row.gene_id][row.transcript_id] = {\"exons\": []}\n\n gene_info_dict[row.gene_id][row.transcript_id][\"chrom\"] = row.Chromosome\n gene_info_dict[row.gene_id][row.transcript_id][\"start\"] = row.Start\n gene_info_dict[row.gene_id][row.transcript_id][\"end\"] = row.End\n gene_info_dict[row.gene_id][row.transcript_id][\n \"gene_symbol\"\n ] = row.gene_name\n gene_info_dict[row.gene_id][row.transcript_id][\n \"biotype\"\n ] = row.gene_type\n gene_info_dict[row.gene_id][row.transcript_id][\"strand\"] = row.Strand\n\n ## Add exon feature info\n elif row.Feature == \"exon\":\n\n if row.gene_id not in gene_info_dict:\n gene_info_dict[row.gene_id] = dict()\n if row.transcript_id not in gene_info_dict[row.gene_id]:\n gene_info_dict[row.gene_id][row.transcript_id] = {\"exons\": []}\n\n gene_info_dict[row.gene_id][row.transcript_id][\"exons\"].append(\n {\"start\": row.Start, \"end\": row.End, \"exon_number\": row.exon_number}\n )\n\n return gene_info_dict", "def populate_coordinate_list(start, end):\n # print(\"im am here:\" , coordinates, DNA_start)\n corod_list = []\n # DNA start is the gene start in the gff\n # coord is the up stream as defined by the region of interest.\n # is gene is (+) coding: DNA_start > coordinates\n if start > end: # + coding\n for number in range(end, start):\n # print(\"DNA start greater, should be +\", direction)\n # need to get rid of negative coodinates is there\n # are any \n if number < 1:\n continue\n corod_list.append(int(number))\n corod_list = corod_list[::-1]\n if start < end:\n for number in range(start, end):\n # need to get rid of negative coodinates is there\n # are any \n if number < 1:\n continue\n corod_list.append(int(number))\n # print(corod_list)\n # we return a reversed list. So we can go through the coorinates away\n # from the gene to see to see if it fals into a gene \n return corod_list", "def get_chromosome_object(agp):\n\n chr = {}\n\n agp = agp.split('\\n')\n\n for i, line in enumerate(agp):\n if len(line) == 0 or line[0] == '#':\n continue\n tabs = line.split(\"\\t\")\n acc = tabs[0]\n start = int(tabs[1])\n stop = int(tabs[2])\n comp_type = tabs[6]\n if 'acc' not in chr:\n chr['accession'] = acc\n chr['type'] = 'nuclear'\n if comp_type == 'centromere':\n chr['centromere'] = {\n 'start': start,\n 'length': stop - start\n }\n if i == len(agp) - 2:\n chr['length'] = stop\n return chr", "def _get_positions(self):\n position_map = dict()\n # Assumes that the positions are indexed in the order of Row-->Well-->FOV\n for well in self.wells:\n for pos in self.store[well].attrs.get('well').get('images'):\n pos_name = pos['path']\n # pos name is 'Pos_xxx'\n pos_idx = int(pos_name.split('_')[-1])\n position_map[pos_idx] = {'name': pos_name, 'well': well}\n return position_map", "def build_gene_indexes(df):\n\tgeneDict = OrderedDict()\n\n\tgeneCount = 0\n\tpreviousGeneIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_gene=\"\"\n\n\tfor i in range(len(df)):\n\n\t\tif df.loc[i,'feature'] == 'gene':\n\t\t\ttrdict = parse_entry(df.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\t\t\n\t\t\tif geneCount != 0:\n\t\t\t\tnewGeneIndex = i\n\t\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\t\t\t\tpreviousGeneIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\tgeneCount += 1\n\n\t\t\telse:\n\t\t\t\tnewgeneIndex = 0\n\t\t\t\tgeneCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\tif i == (len(df)-1):\n\t\t\tnewGeneIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\treturn geneDict", "def ships_in_range(self):\n\n query_string = \"SELECT * from ships_in_range;\"\n\n # Perform query\n self.conn_cur.execute(query_string)\n results = self.conn_cur.fetchall()\n\n # Build dictionary\n ranges = {}\n for row in results:\n ranges[row[0]] = row[1]\n\n return ranges", "def get_gnid_range_map(node_tids):\n ntypes_gid_range = {}\n offset = 0\n for k, v in node_tids.items():\n ntypes_gid_range[k] = [offset + int(v[0][0]), offset + int(v[-1][1])]\n offset += int(v[-1][1])\n\n return ntypes_gid_range", "def chromosome_lengths(self):\n chr_lens = {}\n for r in self.regions(lazy=True):\n if chr_lens.get(r.chromosome) is None:\n chr_lens[r.chromosome] = r.end\n continue\n if r.end > chr_lens[r.chromosome]:\n chr_lens[r.chromosome] = r.end\n return chr_lens", "def _make_key(self):\n all_position_values = (chromosome_sort_key(self.chromosome), self.min_position, self.max_position, \n self.strand, self.position_before, self.position_after)\n return all_position_values", "def get_gene_values(hotel_ids):\n hotel_genes = {}\n subcats = get_subcat_axes()\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"\n SELECT hotel_id, genome\n FROM hotel_genome\n WHERE hotel_id in (%s)\n \"\"\" % \",\".join([str(h) for h in hotel_ids])\n )\n for hotel_id, genome_str in cursor.fetchall():\n genome = [float(g.strip()) for g in genome_str.split(\",\")]\n hotel_genes[hotel_id] = get_hotel_genes_by_subcat(\n subcats, genome)\n return subcats, hotel_genes", "def organize_by_chromosome(genes, transcripts):\n gene_dict = {}\n transcript_dict = {}\n\n for ID in genes:\n gene = genes[ID]\n chromosome = gene.chromosome\n if chromosome not in gene_dict:\n chrom_genes = {}\n chrom_genes[ID] = gene\n gene_dict[chromosome] = chrom_genes\n gene_dict[chromosome][ID] = gene\n\n for ID in transcripts:\n transcript = transcripts[ID]\n chromosome = transcript.chromosome\n if chromosome not in transcript_dict:\n chrom_transcripts = {}\n chrom_transcripts[ID] = transcript\n transcript_dict[chromosome] = chrom_transcripts\n transcript_dict[chromosome][ID] = transcript\n transcript_dict[chromosome][ID] = transcript\n\n return gene_dict, transcript_dict", "def node_positions(self, edge_length=1):\n\n pos_dict = {}\n\n gridsize = self.n_x\n current_node = 0\n for row_idx, row in enumerate(np.arange(0, gridsize, 1)):\n for col_idx, col in enumerate(np.arange(0, gridsize, 1)):\n xval = col * edge_length\n yval = (gridsize - 1 - row) * edge_length\n pos_dict.update({current_node: (xval, yval)})\n current_node += 1\n\n return pos_dict", "def get_range_info(self):\n with open(self.range_path, 'r') as _file:\n for line in _file.readlines():\n list0 = line.strip().split('-')\n range_dict = {\n 'min': int(list0[0], 16),\n 'max': int(list0[1], 16),\n 'max_offset': int(list0[1], 16) - int(list0[0], 16),\n }\n self.ranges.append(range_dict)", "def _hit_range_get(self):\n return (self.hit_start, self.hit_end)", "def iterate_coordinate_dict(gene_gff_line,\n gene_name,\n scaffold,\n current_start,\n current_stop,\n logger):\n current_start = int(current_start)\n current_stop = int(current_stop)\n for gene, vals in gene_gff_line.items():\n # find the genes on the same scaffold\n if scaffold in vals:\n scaff, source, feature, start, stop, score, \\\n direction, frame, gene_info = vals.split(\"\\t\")\n # if its is the same gene as the stop\n if gene_name in gene_info:\n # print(gene_name, gene_info)\n # we dont want to test it against intself!!\n continue\n if scaffold.rstrip() != scaff.rstrip():\n continue\n start = int(start) + 1\n stop = int(stop)\n # basically does the coordinate fall in the current\n # coordinate for a gene\n # call the function to poulate the list\n UTR_coodinate_list = populate_coordinate_list(current_start, current_stop)\n for UTR_coordinate in UTR_coodinate_list:\n ##logger.info(info)\n if UTR_coordinate > start and UTR_coordinate < stop:\n warn = \" \".join([gene_name,\n \"UTR region falls into\",\n \"genic regions of\",\n gene,\n \"on scaffold\",\n scaffold])\n logger.warning(warn)\n return \"HITS genic region\"\n return \"OK\"", "def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions", "def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions", "def checked_positions():\n for base_position in chain([me.shipyard], me.get_dropoffs()):\n x_shipyard = base_position.position.x\n y_shipyard = base_position.position.y\n for x in range(-search_range, search_range):\n for y in range(-search_range, search_range):\n yield hlt.Position(\n x=x_shipyard + x,\n y=y_shipyard + y)", "def getratios(results_condition,conditions):\n setlist = {}\n for r in range(conditions):\n setlist[r] = []\n \n for gene in results_condition.genelist:\n conditions = len(gene.logfold)\n\n count = 0\n for set_ in setlist:\n if gene.logfold[count] > 1.5 or gene.logfold[count] < -1.5 :\n setlist[count].append(gene.name)\n count +=1\n return setlist", "def _get_energy_range(self):\n\n e0_min = self.network.isomers[0].E0\n e0_max = e0_min\n\n for isomer in self.network.isomers[1:]:\n E0 = isomer.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for reactant in self.network.reactants:\n E0 = reactant.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for product in self.network.products:\n E0 = product.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for rxn in self.network.path_reactions:\n E0 = rxn.transition_state.conformer.E0.value_si\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n\n return e0_min, e0_max", "def get_idranges(names, counts, num_chunks=None):\n gnid_start = 0\n gnid_end = gnid_start\n tid_dict = {}\n gid_dict = {}\n\n for idx, typename in enumerate(names):\n gnid_end += counts[typename]\n tid_dict[typename] = [[0, counts[typename]]]\n gid_dict[typename] = np.array([gnid_start, gnid_end]).reshape([1, 2])\n gnid_start = gnid_end\n\n return tid_dict, gid_dict", "def get_hotel_chromosomes(hotel_ids):\n if not hotel_ids:\n return None\n hotel_id_in = ','.join([str(h) for h in hotel_ids])\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"\n SELECT hotel_id, chromosome_id, normalized_score\n FROM hotel_chromosome\n WHERE hotel_id in (%s)\n ORDER BY hotel_id, chromosome_id\n \"\"\" % hotel_id_in\n )\n results = cursor.fetchall()\n hotel_chromosomes = {}\n for key, group in groupby(results, lambda x: x[0]):\n chromosomes = [0.0]*CHROMOSOME_LENGTH\n for g in group:\n chromosomes[g[1]] = g[2]\n hotel_chromosomes[key] = chromosomes\n cursor.close()\n return hotel_chromosomes" ]
[ "0.6318338", "0.6318338", "0.5950929", "0.57985824", "0.57871836", "0.5756468", "0.56780857", "0.5640793", "0.56371856", "0.561185", "0.55959535", "0.55764234", "0.55740255", "0.557101", "0.5568913", "0.55579585", "0.55578256", "0.55408716", "0.5523614", "0.5481604", "0.54741865", "0.5461542", "0.54604477", "0.53797346", "0.53797346", "0.5375557", "0.5351104", "0.5348778", "0.53380466", "0.5324897" ]
0.730751
0
Prints a random number between 1 and the number of sides of the die
def roll_die(self): number = randint(1, self.sides) print(number)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rollDie(self):\n return random.randint(1, self.sides)", "def roll_die(self, number_of_rolls):\n\t\tfor roll in range(0, number_of_rolls):\n\t\t\tprint(str(randint(1, self.sides)), end = \", \")\n\t\tprint()", "def die():\n return random.randint(1,6)", "def roll_dice(self):\r\n return randint(1,self.sides)", "def die_roll():\n roll = random.randint(1,6)\n return roll", "def roll(self):\n\t\treturn randint(1, self.num_sides)", "def random_die():\n return randrange(1, 6)", "def roll(self):\n return randint(1, self.sides)", "def roll(self):\n return randint(1, self.sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll_die(number_of_rolls: int, number_of_sides: int) -> int:\r\n if number_of_rolls <= 0 or number_of_sides <= 0:\r\n return 0\r\n\r\n max_total = number_of_sides * number_of_rolls\r\n\r\n return random.randint(number_of_rolls, max_total)", "def roll(self):\n return random.randrange(1, sides + 1)", "def roll_die(sides = 6, maxi = 6):\n d = 1000\n # discard highest roll(s)\n while d > maxi:\n d = random.randint(1,sides)\n return d", "def rolldie():\n return int(random.random()*6)+1 # or use randrange()", "async def roll(self, ctx, sides: int = None):\n if sides is None:\n await ctx.send(\"You have to tell me how many sides the die has!\")\n return\n roll = random.randint(1, sides)\n await ctx.send(\"You rolled: \" + str(roll))", "def roll(self):\n return random.randint(1,self.sides)\n #return int(self.sides*random.random() + 1.0)", "def _rollOneDie(self):\n return random.randint(1, 6)", "def roll(self) -> int:\n return self.rand.randint(1, self.sides)", "def rollDie():\n return random.choice([1, 2, 3, 4, 5, 6])", "def roll(self):\r\n import random as _random\r\n return _random.randint(1, self.__sides_count)", "def roll_die(number_of_rolls, number_of_sides):\n\n roll = random.randint(1, number_of_sides) # Used recursion for this\n if number_of_rolls == 0:\n return 0 # Base case is 0. If it's 1, then I can roll a 7 with 6 sides\n else:\n return roll + roll_die(number_of_rolls - 1, number_of_sides) # Subtract 1 roll and keep calling function", "def throw_dice():\n return randint(1, 6) + randint(1, 6)", "def dice_roll(name):\n roll = random.randint(1,6)\n print \"{name} shook the die \\\nand rolled a {roll}.\".format(name=name, roll=roll)\n return roll", "def wyldingHand(self, level):\n if level == 0:\n die_result = random.randint(1,6)\n elif level == 1:\n die_result = random.randint(1,10)\n elif level == 2:\n die_result = random.randint(1,6) + random.randint(1,6)\n elif level == 3:\n die_result = random.randint(1,8) + random.randint(1,8)\n\n return die_result", "def dice_roll(name):\n roll = random.randint(1,6)\n print \"{name} shook the die \\\nand rolled a {roll}.\".format( name=name, roll=roll)\n return roll", "def diceRoll():\n return randint(1,6)", "def roll(self):\n return random.choice(self.sides)" ]
[ "0.7796977", "0.7618368", "0.7407574", "0.73979664", "0.72382367", "0.7182427", "0.7154115", "0.7122058", "0.7122058", "0.71132416", "0.71132416", "0.71132416", "0.71132416", "0.7102902", "0.7035336", "0.7031503", "0.7024645", "0.6955785", "0.6931022", "0.68884605", "0.6875295", "0.6844451", "0.67918587", "0.6790089", "0.67711806", "0.6719265", "0.66995233", "0.66989285", "0.66979814", "0.6665929" ]
0.84679675
0
send some data to badash
def send_to_badash(job, data): data['job'] = job data['result'] = 0 resp = requests.post(os.environ.get('BADASH_API_URL', ''), json=data, headers={'X-Api-Key': os.environ.get('BADASH_API_KEY')}) print(resp.status_code)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _send_data(self):\n pass", "def send(self, data):", "def send_data(self, **kwargs):", "def send_data(self, data: dict):\n pass", "def send(self, data):\n pass", "def send(User,data):\n BufferManagement.append_to_buffer(data,User['ID'],User['GameID'],\"OUT\")", "def sendData(self):\n pubk = Loginkey.getpubkey()\n prik = Loginkey.getprikey()\n da = self.p.addHeader(pubk,2)\n self.transport.write(da)\n da = self.p.addHeader(prik,3)\n self.transport.write(da)", "def send_data(self, data):\n # test_dict = {'ext': '1105', 'ip_address': '192.168.10.55', 'status': 'OK', 'ping': '5 ms', 'user': 'Secretary',\n # 'user_agent': 'Cisco/SPA508G-7.4.9a'}\n\n sender_data = []\n host_id = data.get(\"ext\")\n # print(ZABBIX_HOST)\n zbx_sender = ZabbixSender(self.zabbix_host)\n extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get(\"ip_address\"))\n sender_data.append(extension_ip)\n\n extension_ping = ZabbixMetric(host_id, \"extPhonePing\", self.clear_ping(data.get(\"ping\", 10000)))\n sender_data.append(extension_ping)\n\n extension_status = ZabbixMetric(host_id, \"extStatus\", data.get(\"status\", \"\"))\n sender_data.append(extension_status)\n\n extension_user = ZabbixMetric(host_id, \"extUser\", data.get(\"user\", \"\"))\n sender_data.append(extension_user)\n\n extension_useragent = ZabbixMetric(host_id, \"extUserAgent\", data.get(\"user_agent\", \"\"))\n sender_data.append(extension_useragent)\n zbx_sender.send(sender_data)", "def send(self, data: bytes):", "def send(self, data):\n self._send(data)", "def send(connection, data):\n connection.send(pickle.dumps(data))", "def _send_data_to_wbt(self,nnData):\n\t\tnnData += \"END\\n\"\n\t\tself._conn.send(nnData)", "def send_data(self):\n data = self.datastore.use(self.data_name)\n if data is None:\n self.dbg(\"sockets_warning\", \"Data is none for {}\", [self.data_name])\n encoded_data = json.dumps(data).encode()\n self.conn.sendall(encoded_data)\n self.dbg(\"sockets_verbose\", \"Data sent\")", "def _send_data(self, data, time):\n pass", "def send_data(client, index, data):\n client.index(index=index, doc_type=\"data\", body=data)", "def sendData(self, data):\n self.tx.sendBuffer(data)", "def send(self, data):\n self.sent.put(data)", "def _publish(self, data):\n json_data = json.dumps(data)\n self._udp_socket.sendto(json_data, (self._hsflowd_addr, self._hsflowd_port))", "def send (self, data):\n return self.sending.send(data)", "def _send(self, data_str):\n\n self._handler.sendall(data_str)", "def send_data(self):\n self.socket.send(\"DATA\\r\\n\")\n response = self.get_response()\n if response[0] != 354:\n print \"An error has occured try again\"\n print response[1]\n sys.exit(0)", "def send_data(self, data):\r\n try:\r\n self.sock.sendto(data, self.addr)\r\n except Exception:\r\n print(\"Cant't send a package\")", "def send(self, value):\n pass", "def send_data(self):\n if self.key == \"\":\n return False\n try:\n params = self.create_url(self.sqlr.get_last_record_dict())\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"text/plain\"}\n conn = httplib.HTTPConnection(\"api.thingspeak.com:80\")\n conn.request(\"POST\", \"/update\", params, headers)\n response = conn.getresponse()\n #print (\"{0}, {1}\".format(response.status, response.reason))\n data = response.read()\n conn.close()\n self.s.enter(self.timespec, 1, self.send_data, ())\n except httplib.HTTPException as http_exception:\n self.sqlr.insert_alert(\"Connection failed: {0}\".\n format(http_exception.message), 0,0,0)\n print(\"Connection failed: {0}\".format(http_exception.message))\n # try again in 5\n self.s.enter(300, 1, self.send_data, ())\n except Exception as e:\n print(\"I'm the guy killing your script: {0}\".format(e.message))\n # try again in 5\n self.s.enter(300, 1, self.send_data, ())", "def send_command(self, data):\n try:\n self.write(data)\n reply = self.read_line()\n \n if reply == \"{}\":\n pass\n else:\n print \"send_command: received bad reply %s\" % (reply)\n sys.exit(1)\n except Exception:\n raise", "def send(self, data):\n\n self._default_sender(data)", "def send(self, group=0):\n self._data1 = group\n super().send(data1=self._data1)", "def send_data(self, str_data):\n try:\n self.s.sendall(str_data.encode())\n except OSError as e:\n print(e)", "def sendData(data_point):\n send_data = data_point\n\n address = ('localhost', 6000)\n conn = Client(address, authkey=b'secret password')\n # conn.send('close')\n conn.send(send_data)\n conn.close()\n #todo: add error catching\n return send_data", "def _send_data(self, lucky: int, brain_strength: int):\n self.__data_sender.send_lucky.emit(lucky)\n self.__data_sender.send_brain_strength.emit(brain_strength)\n self.__data_sender.send_cur_state.emit(self._cur_state)\n self.__data_sender.send_remain_time.emit(self.__all_time)" ]
[ "0.6979101", "0.6899216", "0.67075515", "0.66911685", "0.658712", "0.64276284", "0.64261144", "0.6391734", "0.6275754", "0.60917", "0.6080601", "0.6060746", "0.60563254", "0.5980716", "0.5978205", "0.5966177", "0.59635454", "0.5939323", "0.5926442", "0.5909892", "0.58855337", "0.58804786", "0.5875571", "0.5872397", "0.58622986", "0.58417916", "0.5817665", "0.5812709", "0.5809419", "0.58076453" ]
0.6981778
0
Returns True if the input array is zero (smaller than machine precision) everywhere. Useful for determining if tilt angle is zero everywhere (i.e. LFM file is in GSM coordinates).
def __isZeroEverywhere(self, array): epsilon = numpy.finfo( type(array[0]) ).eps boolList = numpy.less_equal(numpy.abs(array), epsilon) for b in boolList: if not b: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iszero(a: float) -> bool:\n return np.isclose(a, 0.0, atol=1.0e-12, rtol=0.0)", "def is_zero(self):\n for t in self:\n if t != TRIT_ZERO:\n return False\n return True", "def is_close_to_zero(value: Union[float, np.ndarray]) -> Union[bool, np.ndarray]:\n return abs(value) < 1.0e-10", "def is_zero(self, a):\n return not a", "def _is_zero(self):\n return (len(self._digits) == 1) and\\\n (self._digits[0] == self.Symbols.ZERO.value)", "def is_zero(self):\n # any nonzero entry in any matrix representation\n # disqualifies the morphism as having totally zero outputs\n return self._matrix.is_zero()", "def is_zero(self):\n return float(self.coeff.nominator) / self.coeff.denominator == 0.0", "def is_zero(self):\r\n return self._real.is_zero() and self._imag.is_zero()", "def is_null(self) -> bool:\n for y in range(0, self.num_of_rows):\n for x in range(0, self.num_of_cols):\n if self._A[y][x] != 0:\n return False\n return True", "def is_zero(self) -> bool:\n return self.field.zero == self", "def isZero(self):\n\t\treturn (self.p.isZero() & (self.q.isZero() == False))", "def is_zero(self):\n return self._x == 0 and self._y == 0", "def zero(self) -> bool:\n return self._algorithm.is_last_zero(self._stat)", "def __nonzero__(self):\n for e in self:\n if e != 0:\n return True\n return False", "def isZero(self):\n return self.count == 0", "def isIsotropic( self ) :\n\n for coefficient in self[1:] :\n if( coefficient != 0. ) : return( False )\n return( True )", "def is_zero(self):\n return -0.0001 <= self.l2_norm() <= 0.0001", "def is_zero(self):\n return (self._num == 0)", "def __nonzero__(self):\n return not self.as_point == (0, 0)", "def isAllZeros(self, currentState):\n\t\tisZeros = True\n\t\tfor i in currentState:\n\t\t\tif i != 0:\n\t\t\t\tisZeros = False\n\t\t\t\tbreak\n\t\treturn isZeros", "def component_is_zero(self, key):\n a = self[key]\n return not np.any(a)", "def equals_zero(self, number):\n return abs(number) < self.zero_sensitivity", "def is_zero(self):\n return self._express.is_zero()", "def has_zeros(tensor, verbose=True):\n tensor_numpy = tensor.data.cpu().numpy().flatten()\n where_zero = np.argwhere(tensor_numpy == 0.0)\n\n zero_count = len(where_zero)\n zero = zero_count != 0\n\n if verbose and zero:\n print(f\"Encountered {zero_count} zeros\")\n\n return zero", "def is_zero(f):\n return dmp_zero_p(f.rep, f.lev)", "def is_zero_matrix(self):\n M = self.rep\n for i in range(self.rows):\n for j in range(self.cols):\n if M[i, j]:\n return False\n return True", "def isVecZero(vec):\n trues = [isZero(e) for e in vec]\n return all(trues)", "def is_empty(self):\n return ch.prod(ch.tensor(self.x.shape)).item() == 0", "def isZero(val):\n if isSympy(val):\n try:\n val = expressionToNumber(val)\n except Exception:\n return False\n try:\n if np.isclose(np.abs(val), 0):\n return True\n except TypeError:\n newVal = complex(val)\n return np.abs(newVal) == 0", "def is_zero(self):\n for action, prob in self._regrets.items():\n if prob != 0.0:\n return False\n return True" ]
[ "0.7329743", "0.71980375", "0.7180356", "0.6959584", "0.68835986", "0.6858602", "0.6843292", "0.6754056", "0.67275715", "0.6671207", "0.66645455", "0.66500664", "0.6643579", "0.6640681", "0.6592677", "0.6558841", "0.64952725", "0.64739907", "0.6471248", "0.64580184", "0.63760114", "0.63738686", "0.6370563", "0.63652396", "0.63641435", "0.6312675", "0.6286577", "0.6284793", "0.62698966", "0.62400216" ]
0.7982687
0
Transform all magnetic field B and velocity V values from SM to GSM coordinates. Store results by overwriting dataDict contents.
def __sm2gsm(self, dataDict): b = (dataDict.getData('bx'),dataDict.getData('by'),dataDict.getData('bz')) v = (dataDict.getData('vx'),dataDict.getData('vy'),dataDict.getData('vz')) for i,time in enumerate(dataDict.getData('time_min')): d = self.startDate + datetime.timedelta(minutes=time) # Update magnetic field b_gsm = pyLTR.transform.SMtoGSM(b[0][i], b[1][i], b[2][i], d) dataDict.setData('bx', b_gsm[0], i) dataDict.setData('by', b_gsm[1], i) dataDict.setData('bz', b_gsm[2], i) # Update Velocity v_gsm = pyLTR.transform.SMtoGSM(v[0][i], v[1][i], v[2][i], d) dataDict.setData('vx', v_gsm[0], i) dataDict.setData('vy', v_gsm[1], i) dataDict.setData('vz', v_gsm[2], i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess(self):\n\n mm_magcoord.add_aacgm_coordinates(self)\n mm_magcoord.add_quasi_dipole_coordinates(self)\n mm_sc.calculate_ecef_velocity(self)\n mm_sc.add_ram_pointing_sc_attitude_vectors(self)\n\n return", "def FL_2_mav_state(self, data):\n\n (data['velocities/phidot-rad_sec_bf'],\n data['velocities/thetadot-rad_sec_bf'],\n data['velocities/psidot-rad_sec_bf']) = self.convert_body_frame(\n data['attitude/phi-rad'],\n data['attitude/theta-rad'],\n data['velocities/phidot-rad_sec'],\n data['velocities/thetadot-rad_sec'],\n data['velocities/psidot-rad_sec']\n )\n\n # Data treatment\n message_data = [\n 1000000000,\n # int(data['simulation/sim-time-sec']*cls._sec2usec), # time usec\n data['attitude/phi-rad'], # phi rad float\n data['attitude/theta-rad'], # theta rad float\n data['attitude/psi-rad'], # psi rad float\n data['velocities/phidot-rad_sec'], # rollspeed rad.s-1 float\n data['velocities/thetadot-rad_sec'], # pitchspeed rad.s-1 float\n data['velocities/psidot-rad_sec'], # yawspeed rad.s-1 float\n int(data['position/lat-gc-rad']*self._rad2degE7), # lat 10e7.deg int\n int(data['position/long-gc-rad']*self._rad2degE7), # lon 10e7.deg int\n int(data['position/h-sl-ft']*self._ft2m*self._m2mm), # alt mm int\n int(data['velocities/v-north-fps']*self._ft2m*self._m2cm), # vx cm.s-1 int\n int(data['velocities/v-east-fps']*self._ft2m*self._m2cm), # vy cm.s-1 int\n int(data['velocities/v-down-fps']*self._ft2m*self._m2cm), # vz cm.s-1 int\n int(data['accelerations/udot-ft_sec2']*self._ft2m*self._mpss2mg), # xacc 1000/g int\n int(data['accelerations/vdot-ft_sec2']*self._ft2m*self._mpss2mg), # yacc 1000/g int\n int(data['accelerations/wdot-ft_sec2']*self._ft2m*self._mpss2mg) # zacc 1000/g int\n ]\n return message_data", "def FL_2_mav_sensor(self, data):\n\n (data['velocities/phidot-rad_sec_bf'],\n data['velocities/thetadot-rad_sec_bf'],\n data['velocities/psidot-rad_sec_bf']) = self.convert_body_frame(\n data['attitude/phi-rad'],\n data['attitude/theta-rad'],\n data['velocities/phidot-rad_sec'],\n data['velocities/thetadot-rad_sec'],\n data['velocities/psidot-rad_sec']\n )\n\n #Data treatment\n messagedata = [\n int(data['simulation/sim-time-sec']*self._sec2usec), #time_usec boot time usec int\n data['accelerations/udot-ft_sec2'], #xacc m/s**2 float\n data['accelerations/vdot-ft_sec2'], #yacc m/s**2 float\n data['accelerations/wdot-ft_sec2'], #zacc m/s**2 float\n data['velocities/phidot-rad_sec'], #xgyro rad/s float\n data['velocities/thetadot-rad_sec'], #ygyro rad/s float\n data['velocities/psidot-rad_sec'], #zgyro rad/s float\n data['sensors/magnetometer/X/output'], #xmag Gauss float\n data['sensors/magnetometer/Y/output'], #ymag Gauss float\n data['sensors/magnetometer/Z/output'], #zmag Gauss float\n data['atmosphere/P-psf']*0.478802589, #abs_pr mbar float\n 0.0, #dif_pr mbar float\n data['atmosphere/pressure-altitude'], #pr_alt meter float\n 15.0, #temp C float\n int(8186) #fields_updated (ALL)\n ]\n\n return messagedata", "def FL_2_mav_gps(self, data):\n\n\n\n #Data treatment\n messagedata = [\n int(data['simulation/sim-time-sec']*self._sec2usec), #time_usec boot time usec int\n 3, #Fix_type uint8_t\t0-1: no fix, 2: 2D fix, 3: 3D fix. Some applications will not use the value of this field unless it is at least two, so always correctly fill in the fix.\n int(data['position/lat-gc-rad']*self._rad2degE7), # lat 10e7.deg int\n int(data['position/long-gc-rad']*self._rad2degE7), # lon 10e7.deg int\n int(data['position/h-sl-ft']*self._ft2m*self._m2mm), # alt mm int\n 65535, #eph uint16_t\tGPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535\n 65535, #epv\tuint16_t\tGPS VDOP vertical dilution of position in cm (m*100). If unknown, set to: 65535\n 65535, #uint16_t\tGPS ground speed (m/s * 100). If unknown, set to: 65535\n int(data['velocities/v-north-fps']*self._ft2m*self._m2cm),# vn cm.s-1 int\n int(data['velocities/v-east-fps']*self._ft2m*self._m2cm), # ve cm.s-1 int\n int(data['velocities/v-down-fps']*self._ft2m*self._m2cm), # vd cm.s-1 int\n 65535, #cog\tuint16_t\tCourse over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535\n 255, #satellites_visible\tuint8_t\tNumber of satellites visible. If unknown, set to 255\n ]\n\n return messagedata", "def calcMagneticFieldMap(self):\n # Normalised b-field (note lower case)\n self.solenoid.calcMagneticFieldMap()\n self.b = lambda z: self.solenoid.B_interp(z) * -e / (2 * m * c)\n self.calc_level = CALC_B_MAP", "def process(self, **kwargs):\n self._score_modifier = 0.0\n\n # Mass of BH\n self._Mbh = kwargs[self.key('M1')]\n # Mass of NS\n self._Mns = kwargs[self.key('M2')]\n self._m_tov = kwargs[self.key('Mtov')]\n self._Rns = kwargs[self.key('radius_ns')]\n\n # Soft max/min, proportional to diff^2 and scaled to -100 for 0.1 Msun\n # 1\n if self._Mbh < self._m_tov:\n self._score_modifier -= (100. * (self._m_tov-self._Mbh))**2\n \n # 2\n if self._Mns > self._m_tov:\n self._score_modifier -= (100. * (self._Mns-self._m_tov))**2\n\n # 3\n if self._Mns < 0.8:\n self._score_modifier -= (100. * (0.8-self._Mns))**2\n\n # 4\n if self._Rns > 16:\n self._score_modifier -= (20. * (self._Rns-16))**2\n\n if self._Rns < 9:\n self._score_modifier -= (20. * (9-self._Rns))**2\n\n\n # 5\n Mcaus = 1/2.82 * C_CGS**2 * self._Rns * KM_CGS / G_CGS / M_SUN_CGS\n\n if self._m_tov > Mcaus:\n self._score_modifier -= (100. * (self._m_tov-Mcaus))**2\n\n return {self.key('score_modifier'): self._score_modifier}", "def postprocessData(meta, units, data):\n\n data['time'] = np.arange(0, meta['dt'] * len(data), meta['dt'])\n units['time'] = 's'\n\n meta, units, data = self.calculateForce(meta, units, data)\n\n data['distance'] = np.sqrt(data.xDist**2 + data.yDist**2)\n units['distance'] = 'nm'\n\n return meta, units, data", "def _update_motion_data(self, msg):\n if self._auv_motion != msg.motion:\n self._target_euler[\"alpha\"] = self._actual_euler[\"alpha\"]\n self._target_euler[\"beta\"] = self._actual_euler[\"beta\"]\n self._target_euler[\"gamma\"] = self._actual_euler[\"gamma\"]\n self._auv_motion = msg.motion\n self._thrusters_actual_speed[\"1\"] = msg.thrusters_speed.thruster_id1_speed\n self._thrusters_actual_speed[\"2\"] = msg.thrusters_speed.thruster_id2_speed\n self._thrusters_actual_speed[\"3\"] = msg.thrusters_speed.thruster_id3_speed\n self._thrusters_actual_speed[\"4\"] = msg.thrusters_speed.thruster_id4_speed\n self._thrusters_actual_speed[\"5\"] = msg.thrusters_speed.thruster_id5_speed\n self._thrusters_actual_speed[\"6\"] = msg.thrusters_speed.thruster_id6_speed\n self._thrusters_actual_speed[\"7\"] = msg.thrusters_speed.thruster_id7_speed\n self._thrusters_actual_speed[\"8\"] = msg.thrusters_speed.thruster_id8_speed", "def populate_from_qrev_mat(self, mat_data):\n\n # Variables passed to the constructor\n\n if type(mat_data.frequency_hz) is np.ndarray:\n self.frequency_khz = mat_data.frequency_hz\n elif np.isnan(mat_data.frequency_hz):\n self.frequency_khz = None\n else:\n self.frequency_khz = mat_data.frequency_hz\n self.orig_coord_sys = mat_data.origCoordSys\n self.nav_ref = mat_data.navRef\n\n # Data requiring manipulation if only 1 ensemble\n if type(mat_data.u_mps) is float:\n self.raw_vel_mps = mat_data.rawVel_mps.reshape(mat_data.rawVel_mps.shape[0], 1)\n # Coordinate transformed data\n self.coord_sys = np.array([mat_data.coordSys])\n self.u_mps = np.array([mat_data.u_mps])\n self.v_mps = np.array([mat_data.v_mps])\n self.w_mps = np.array([mat_data.w_mps])\n self.d_mps = np.array([mat_data.d_mps])\n\n self.bottom_mode = np.array([mat_data.bottomMode])\n\n # Processed data\n self.u_processed_mps = np.array([mat_data.uProcessed_mps])\n self.v_processed_mps = np.array([mat_data.vProcessed_mps])\n self.processed_source = np.array([mat_data.processedSource])\n self.valid_data = np.array([ mat_data.validData]).astype(bool)\n self.valid_data = self.valid_data.reshape(-1, 1)\n self.smooth_speed = np.array([mat_data.smoothSpeed])\n self.smooth_upper_limit = np.array([mat_data.smoothUpperLimit])\n self.smooth_lower_limit = np.array([mat_data.smoothLowerLimit])\n else:\n self.raw_vel_mps = mat_data.rawVel_mps\n # Coordinate transformed data\n self.coord_sys = mat_data.coordSys\n self.u_mps = mat_data.u_mps\n self.v_mps = mat_data.v_mps\n self.w_mps = mat_data.w_mps\n self.d_mps = mat_data.d_mps\n\n self.bottom_mode = mat_data.bottomMode\n\n # Processed data\n self.u_processed_mps = mat_data.uProcessed_mps\n self.v_processed_mps = mat_data.vProcessed_mps\n self.processed_source = mat_data.processedSource\n self.valid_data = mat_data.validData.astype(bool)\n self.smooth_speed = mat_data.smoothSpeed\n self.smooth_upper_limit = mat_data.smoothUpperLimit\n self.smooth_lower_limit = mat_data.smoothLowerLimit\n\n self.num_invalid = mat_data.numInvalid\n # Error velocity filter\n if type(mat_data.dFilter) is np.ndarray:\n self.d_filter = None\n else:\n self.d_filter = mat_data.dFilter\n\n # Error velocity threshold\n if type(mat_data.dFilterThreshold) is np.ndarray:\n self.d_filter_threshold = None\n else:\n self.d_filter_threshold = mat_data.dFilterThreshold\n\n # Vertical velocity filter\n if type(mat_data.wFilter) is np.ndarray:\n self.w_filter = None\n else:\n self.w_filter = mat_data.wFilter\n\n # Vertical velocity threshold\n if type(mat_data.wFilterThreshold) is np.ndarray:\n self.w_filter_threshold = None\n else:\n self.w_filter_threshold = mat_data.wFilterThreshold\n\n # GPS quality filter\n if type(mat_data.gpsDiffQualFilter) is np.ndarray:\n self.gps_diff_qual_filter = None\n else:\n self.gps_diff_qual_filter = mat_data.gpsDiffQualFilter\n\n # GPS altitude filter\n if type(mat_data.gpsAltitudeFilter) is np.ndarray:\n self.gps_altitude_filter = None\n else:\n self.gps_altitude_filter = mat_data.gpsAltitudeFilter\n\n # GPS altitude threshold\n if type(mat_data.gpsAltitudeFilterChange) is np.ndarray:\n self.gps_altitude_filter_change = None\n else:\n self.gps_altitude_filter_change = mat_data.gpsAltitudeFilterChange\n\n # HDOP filter\n if type(mat_data.gpsHDOPFilter) is np.ndarray:\n self.gps_HDOP_filter = None\n else:\n self.gps_HDOP_filter = mat_data.gpsHDOPFilter\n\n # HDOP max threshold\n if type(mat_data.gpsHDOPFilterMax) is np.ndarray:\n self.gps_HDOP_filter_max = None\n else:\n self.gps_HDOP_filter_max = mat_data.gpsHDOPFilterMax\n\n # HDOP change threshold\n if type(mat_data.gpsHDOPFilterChange) is np.ndarray:\n self.gps_HDOP_filter_change = None\n else:\n self.gps_HDOP_filter_change = mat_data.gpsHDOPFilterChange\n\n # Other filters\n self.smooth_filter = mat_data.smoothFilter\n self.interpolate = mat_data.interpolate\n self.beam_filter = mat_data.beamFilter", "def SM2m(sm):\n return sm * 1609.344", "def data(dbfilename = os.path.expanduser('~/python/project/znuc2012.S4.star.el.y.stardb.gz')):\n db = stardb.load(dbfilename) # loads database\n nmass = db.nvalues[0] # finds the number of values\n masses = db.values[0][:nmass] #creates a vector of the initial masses\n isodb = stardb.load(os.path.expanduser('~/python/project/znuc2012.S4.star.deciso.y.stardb.gz'))\n \n massnumber = []\n for x in range(len(isodb.ions)):\n mn = isodb.ions[x].A\n massnumber.append(mn)\n massnumber = np.array(massnumber)\n np.save(os.path.expanduser('~/python/project/filestoload/Massnumber'), massnumber) \n####################### \n# write all energy and mixing values\n\n energyvalues = np.unique(db.fielddata['energy'])\n mixingvalues = np.unique(db.fielddata['mixing'])\n masterremnant = [] # result will be a multidimensional array\n elementdata = []\n isodata = []\n r = len(db.ions) # for loop iteration\n w = len(isodb.ions)\n for energy in energyvalues:\n remmixingarray = [] # reinitialise the next dimension\n elmixingarray = []\n isomixingarray = []\n for mixing in mixingvalues:\n \n \n ii = np.logical_and(np.isclose(db.fielddata['energy'], energy), np.isclose(db.fielddata['mixing'], mixing))\n \n mass = db.fielddata[ii]['remnant']\n remmixingarray.append(mass) # this is an array of remnant masses for one energy and every mixing value\n \n elfill = [] # reinitialise the next dimension again\n isofill = []\n \n \n for m in range(w):\n \n a = isodb.ions[m] #for obtaining the element string\n kk = np.where(isodb.ions==isotope.ion(a)) # finding the indices in db.ions for a particular element\n jj = np.where(ii)\n isotopes = isodb.data[jj, kk][0] # array of abundances for that particular element\n isofill.append(isotopes) # this is an array of element data for every mass for one energy and one mixing value\n\n\n\n\n isomixingarray.append(isofill) \n \n \n masterremnant.append(remmixingarray) # these master arrays have every bit of data under its own energy. so called like elementdata[energy][mixing][elementnumber] gives the element data for every star for a single element.\n \n isodata.append(isomixingarray)\n \n np.save(os.path.expanduser('~/python/project/filestoload/IsoData'), isodata)\n np.save(os.path.expanduser('~/python/project/filestoload/RemnantMasses'), masterremnant)\n np.save(os.path.expanduser('~/python/project/filestoload/Ioninfo'), isodb.ions)\n time = [] \n \n for mass in masses: # for loop will cycle through the masses and grab the lifetime of each star\n s = str(mass) # converts the mass number to a string for file acquiring\n if s.endswith('.0'): # formatting issue, to match the filenames\n s = s[:-2] \n filename = os.path.expanduser('~/python/project/dumps/z{}#presn').format(s)\n # grabs filename corrosponding to this mass\n d = kepdump.load(filename) # loads the kepdump data for this star\n time.append(d.time) \n yr = 365.2425*86400 \n time = np.array(time)/yr\n dataarray = [masses, time]\n\n\n return dataarray", "def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))", "def FL_2_mav_state_quaternion(self, data):\n\n #Attitude quaternion\n quaternion = self.attitude_quaternion(data['attitude/phi-rad'], data['attitude/theta-rad'], data['attitude/psi-rad'])\n\n #Data treatment\n messagedata = [\n int(data['simulation/sim-time-sec']*self._sec2msec), #time_msec\n quaternion, #attitude_quaternion\n data['velocities/phidot-rad_sec'], #rollspeed\n data['velocities/thetadot-rad_sec'], #pitchspeed\n data['velocities/psidot-rad_sec'], #yawspeed\n data['position/lat-gc-rad'], #lat\n data['position/long-gc-rad'], #lon\n data['position/h-sl-ft'], #alt\n data['velocities/v-north-fps'], #vx\n data['velocities/v-east-fps'], #vy\n data['velocities/v-down-fps'], #vz\n data['velocities/vc-fps'], #ind airspeed\n data['velocities/vtrue-fps'], #true airspeed\n data['accelerations/udot-ft_sec2'], #xacc\n data['accelerations/vdot-ft_sec2'], #yacc\n data['accelerations/wdot-ft_sec2'], #zacc\n ]\n return messagedata", "def getGM2MIsd(self):\r\n return {self._pga: self._constants[self._pga]['SMMI'],\r\n self._pgv: self._constants[self._pgv]['SMMI'],\r\n self._sa03: self._constants[self._sa03]['SMMI'],\r\n self._sa10: self._constants[self._sa10]['SMMI'],\r\n self._sa30: self._constants[self._sa30]['SMMI']}", "def var_transform(self, do_data=False):\n\n empty_vars = ['leadJetEn', 'leadJetPt', 'leadJetPhi', 'leadJetEta', 'leadJetQGL',\n 'subleadJetEn', 'subleadJetPt', 'subleadJetPhi', 'subleadJetEta', 'subleadJetQGL',\n 'subsubleadJetEn', 'subsubleadJetPt', 'subsubleadJetPhi', 'subsubleadJetEta', 'subsubleadJetQGL',\n 'dijetMinDRJetEle', 'dijetDieleAbsDEta','dijetDieleAbsDPhiTrunc', 'dijetCentrality', 'dijetMass', \n 'dijetAbsDEta', 'dijetDPhi'] \n\n replacement_value = -10\n\n for empty_var in empty_vars:\n self.data_obj.mc_df_sig[empty_var] = self.data_obj.mc_df_sig[empty_var].replace(-999., replacement_value)\n self.data_obj.mc_df_bkg[empty_var] = self.data_obj.mc_df_bkg[empty_var].replace(-999., replacement_value)\n if do_data: self.data_obj.data_df[empty_var] = self.data_obj.data_df[empty_var].replace(-999., replacement_value)\n\n #print self.data_obj.mc_df_sig[empty_vars]\n #print np.isnan(self.data_obj.mc_df_sig[empty_vars]).any()\n\n for var in gev_vars:\n if var in (self.low_level_vars_flat+self.high_level_vars):\n self.data_obj.mc_df_sig[var] = self.data_obj.mc_df_sig.apply(self.var_transform_helper, axis=1, args=[var, replacement_value])\n self.data_obj.mc_df_bkg[var] = self.data_obj.mc_df_bkg.apply(self.var_transform_helper, axis=1, args=[var, replacement_value])\n if do_data: self.data_obj.data_df[var] = self.data_obj.data_df.apply(self.var_transform_helper, axis=1, args=[var, replacement_value])\n\n #print np.isnan(self.data_obj.mc_df_sig[empty_vars]).any()", "def _getBMat(self):\n\n \"\"\"B matrix is just a mass matrix, can be easily assembled\n through fenics. However, the ordering in Fenics is not the\n mesh ordering. So we build a temp matrix and then use the\n vertex to dof map to get the right ordering interms of our\n mesh nodes.\n \"\"\"\n\n # create function space of order 1. For KL, we only restrict\n # to first order spaces.\n V = FunctionSpace(self._mesh, \"CG\", 1)\n # Define basis and bilinear form\n u = TrialFunction(V)\n v = TestFunction(V)\n a = u * v * dx\n # assemble in a temp matrix\n B_temp = assemble(a)\n\n # create petsc matrix B\n B = PETSc.Mat().create()\n B.setType('aij')\n B.setSizes(self.domain.getNodes(), self.domain.getNodes())\n B.setUp()\n\n # store the value in a a temp array B_ij\n B_ij = B_temp.array()\n\n # get the vertex to dof map\n v_to_d_map = vertex_to_dof_map(V)\n\n print '---------------------------'\n print '---------------------------'\n print ' Building Mass Matrix '\n print '---------------------------'\n print '---------------------------'\n for node_i in range(0, self.domain.getNodes()):\n for node_j in range(node_i, self.domain.getNodes()):\n B_ij_nodes = B_ij[v_to_d_map[node_i], v_to_d_map[node_j]]\n if B_ij_nodes > 0:\n B.setValue(node_i, node_j, B_ij_nodes)\n B.setValue(node_j, node_i, B_ij_nodes)\n\n B.assemblyBegin()\n B.assemblyEnd()\n print '---------------------------'\n print '---------------------------'\n print ' Finished Mass Matrix '\n print '---------------------------'\n print '---------------------------'\n return B", "def to_mmxu(self, mmxu):\r\n if (self.position == SinglePhaseBreaker.CLOSED):\r\n super().to_mmxu(mmxu)\r\n else:\r\n now = datetime.now()\r\n mmxu_dict = {\r\n \"A\": 0,\r\n \"Hz\": 0,\r\n \"PF\": 1,\r\n \"PFSign\": 0,\r\n \"V\": 0,\r\n \"VA\": 0,\r\n \"VAr\": 0,\r\n \"W\": 0\r\n }\r\n set_phase_a_mmxu(mmxu, mmxu_dict, now)", "def parse_MTData2(self, data):\n # Functions to parse each type of packet\n def parse_orientation(data_id, content):\n o = struct.unpack('!'+3*self._ffmt, content)\n if self._coorSys == 'ENU':\n self._imu.orientation.x = o[0]\n self._imu.orientation.y = o[1]\n self._imu.orientation.z = o[2]\n elif self._coorSys == 'NED':\n self._imu.orientation.x = o[0]\n self._imu.orientation.y = -o[1]\n self._imu.orientation.z = -o[2]\n self._getAll = True\n\n def parse_acceleration(data_id, content):\n o = struct.unpack('!'+3*self._ffmt, content)\n if self._coorSys == 'ENU':\n self._imu.linear_acceleration.x = o[0]\n self._imu.linear_acceleration.y = o[1]\n self._imu.linear_acceleration.z = o[2]\n elif self._coorSys == 'NED':\n self._imu.linear_acceleration.x = o[0]\n self._imu.linear_acceleration.y = -o[1]\n self._imu.linear_acceleration.z = -o[2]\n\n def parse_angular_velocity(data_id, content):\n o = struct.unpack('!'+3*self._ffmt, content)\n if self._coorSys == 'ENU':\n self._imu.angular_velocity.x = o[0]\n self._imu.angular_velocity.y = o[1]\n self._imu.angular_velocity.z = o[2]\n if self._coorSys == 'NED':\n self._imu.angular_velocity.x = o[0]\n self._imu.angular_velocity.y = -o[1]\n self._imu.angular_velocity.z = -o[2]\n\n # data object\n while data:\n data_id, size = struct.unpack('!HB', data[:3])\n # determind a data format (double or float)\n\n content = data[3:3+size]\n data = data[3+size:]\n group = data_id & 0xFFF0\n\n if group == XDIGroup.OrientationData:\n parse_orientation(data_id, content)\n elif group == XDIGroup.Acceleration:\n parse_acceleration(data_id, content)\n elif group == XDIGroup.AngularVelocity:\n parse_angular_velocity(data_id, content)", "def _M_step(self, stats):\n new_model = super()._M_step(stats)\n\n if 'e' in self.tr_params:\n new_model['B'] = [\n (stats['B']['numer'][i] / stats['B']['denom'][i])\n for i in range(self.n_emissions)\n ]\n\n return new_model", "def __storeDataDict(self, dataArray):\n (nCols, nRows) = dataArray.shape\n\n keys = ['time_min', 'n', 'vx','vy','vz', 'cs', 'bx','by','bz','b']\n names = ['Time (Minutes since start)','Density','Vx (gsm)','Vy (gsm)','Vz (gsm)','Sound Speed','Bx (gsm)','By (gsm)','Bz (gsm)','B']\n units = ['min',r'$\\mathrm{1/cm^3}$',\n r'$\\mathrm{km/s}$', r'$\\mathrm{km/s}$', r'$\\mathrm{km/s}$',\n r'$\\mathrm{km/s}$',\n r'$\\mathrm{nT}$',r'$\\mathrm{nT}$',r'$\\mathrm{nT}$',\n r'$\\mathrm{nT}$'\n ]\n\n # Fill in the data dictionary\n for i, key in enumerate(keys):\n self.data.append(key, names[i], units[i], dataArray[i,:])\n\n # If a non-zero tilt angle is in the solar wind file, data is\n # stored in SM coordinates.\n if ( nCols > 10 ):\n if (not self.__isZeroEverywhere(dataArray[10,:])):\n keys.append('tilt')\n names.append('SM Tilt Angle')\n units.append('Rad')\n self.__sm2gsm(self.data)", "def process_pssm_data(self):\n\n self.pssm_data = self._mask_pssm(self.pssm_data,nmask=self.nmask)\n self.pssm_data = self._filter_pssm(self.pssm_data)\n self.pssm_data = self._smooth_pssm(self.pssm_data,msmooth=self.nsmooth)\n self.pssm_data = np.mean(self.pssm_data,1)", "def processEnvData(self, data):\n\n currenttime = datetime.utcnow()\n outdate = datetime.strftime(currenttime, \"%Y-%m-%d\")\n filename = outdate\n actualtime = datetime.strftime(currenttime, \"%Y-%m-%dT%H:%M:%S.%f\")\n outtime = datetime.strftime(currenttime, \"%H:%M:%S\")\n timestamp = datetime.strftime(currenttime, \"%Y-%m-%d %H:%M:%S.%f\")\n #header = \"# MagPyBin, sensor_id, [parameterlist], [unit-conversion-list], packing string, length\"\n packcode = '6hLllL'\n sensorid = self.sensor\n header = \"# MagPyBin %s %s %s %s %s %s %d\" % (sensorid, '[t1,t2,var1]', '[T,DewPoint,RH]', '[deg_C,deg_C,per rh]', '[1000,1000,1000]', packcode, struct.calcsize(packcode))\n\n valrh = re.findall(r'\\d+',data[0])\n if len(valrh) > 1:\n temp = float(valrh[0] + '.' + valrh[1])\n else:\n temp = float(valrh[0])\n valrh = re.findall(r'\\d+',data[1])\n if len(valrh) > 1:\n rh = float(valrh[0] + '.' + valrh[1])\n else:\n rh = float(valrh[0])\n valrh = re.findall(r'\\d+',data[2])\n if len(valrh) > 1:\n dew = float(valrh[0] + '.' + valrh[1])\n else:\n dew = float(valrh[0])\n\n datearray = timeToArray(timestamp)\n\n try:\n datearray = timeToArray(timestamp)\n datearray.append(int(temp*1000))\n datearray.append(int(dew*1000))\n datearray.append(int(rh*1000))\n data_bin = struct.pack(packcode,*datearray)\n except:\n log.msg('Error while packing binary data')\n pass\n\n # File Operations\n dataToFile(self.outputdir, sensorid, filename, data_bin, header)\n\n # create a dictionary out of the input file\n\n evt0 = {'id': 0, 'value': self.hostname}\n evt1 = {'id': 1, 'value': timestamp}\n evt3 = {'id': 3, 'value': outtime}\n evt30 = {'id': 30, 'value': temp}\n evt33 = {'id': 33, 'value': rh}\n evt34 = {'id': 34, 'value': dew}\n evt99 = {'id': 99, 'value': 'eol'}\n\n return evt0,evt1,evt3,evt30,evt33,evt34,evt99", "def mms_split_fgm_data(probe, data_rate, level, instrument, suffix=''):\n\n probe = probe.lower()\n instrument = instrument.lower()\n data_rate = data_rate.lower()\n level = level.lower()\n\n if level.lower() == 'l2pre':\n data_rate_mod = data_rate + '_l2pre'\n else:\n data_rate_mod = data_rate\n\n coords = ['dmpa', 'gse', 'gsm', 'bcs']\n\n out_vars = []\n\n for coord in coords:\n if level in ['l2', 'l2pre']:\n tplot_name = 'mms' + probe + '_' + instrument + '_b_' + coord + '_' + data_rate + '_' + level + suffix\n else:\n tplot_name = 'mms' + probe + '_' + instrument + '_' + data_rate_mod + '_' + coord + suffix\n\n if not data_exists(tplot_name):\n continue\n\n fgm_data = get_data(tplot_name, dt=True)\n\n if fgm_data is None:\n continue\n\n metadata = get_data(tplot_name, metadata=True)\n\n if suffix != '':\n tplot_name = tplot_name[0:-len(suffix)]\n\n store_data(tplot_name + '_bvec' + suffix, data={'x': fgm_data.times, 'y': fgm_data.y[:, :3]}, attr_dict=metadata)\n store_data(tplot_name + '_btot' + suffix, data={'x': fgm_data.times, 'y': fgm_data.y[:, 3]}, attr_dict=metadata)\n\n options(tplot_name + '_btot' + suffix, 'legend_names', 'Bmag')\n options(tplot_name + '_btot' + suffix, 'ytitle', 'MMS'+probe + ' FGM')\n\n out_vars.append(tplot_name + '_bvec' + suffix)\n out_vars.append(tplot_name + '_btot' + suffix)\n\n return out_vars", "def create_flux_vector_pms_gr(self):\n soma_prod = 0\n soma_inj = 0\n lim4 = 1e-4\n store_velocity = {}\n store_flux = {}\n for primal in self.primals:\n #1\n primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]\n primal_id = self.ident_primal[primal_id]\n fine_elems_in_primal = self.mb.get_entities_by_handle(primal)\n for volume in fine_elems_in_primal:\n #2\n flux = {}\n velocity = {}\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n centroid_volume = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - centroid_volume[2]\n adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n for adj in adjs_vol:\n #3\n gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n if adj not in fine_elems_in_primal:\n #4\n pvol = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n #3\n else:\n #4\n pvol = self.mb.tag_get_data(self.pcorr_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pcorr_tag, adj, flat=True)[0]\n #3\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n centroid_adj = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - centroid_adj[2]\n direction = centroid_adj - centroid_volume\n unit = direction/np.linalg.norm(direction)\n #unit = vetor unitario na direcao de direction\n uni = self.unitary(direction)\n # uni = valor positivo do vetor unitario\n kvol = np.dot(np.dot(kvol,uni),uni)\n kadj = np.dot(np.dot(kadj,uni),uni)\n keq = self.kequiv(kvol, kadj)/(self.mi)\n keq2 = keq\n keq = keq*(np.dot(self.A, uni))\n pvol2 = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj2 = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))\n grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))\n grad_p2 = (padj2 - pvol2)/float(abs(np.dot(direction, uni)))\n q = (grad_p)*keq - grad_z*keq*self.gama\n print((grad_p)*keq)\n print(- grad_z*keq*self.gama)\n print(q)\n print(self.store_flux_pf_gr[volume][tuple(unit)])\n print('\\n')\n import pdb; pdb.set_trace()\n\n if gid_adj > gid_vol:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n else:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n\n flux[tuple(unit)] = q\n velocity[tuple(unit)] = v\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n\n #2\n # print(gid_vol)\n # print(velocity)\n # print('\\n')\n # import pdb; pdb.set_trace()\n store_flux[volume] = flux\n self.mb.tag_set_data(self.flux_fine_pms_tag, volume, sum(flux.values()))\n # flt = sum(flux.values())\n # if volume not in self.wells_inj and volume not in self.wells_prod:\n # lim4 = 1e-7\n # if abs(flt) > lim4:\n # print(gid_vol)\n # print(flt)\n # import pdb; pdb.set_trace()\n # flt = sum(flux.values())\n store_velocity[volume] = velocity\n\n for volume in set(self.all_fine_vols) - set(self.wells):\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n values = store_flux[volume].values()\n if sum(values) > lim4:\n print('fluxo multiescala nao esta dando conservativo')\n print('gid:{0}'.format(gid))\n print(sum(values))\n import pdb; pdb.set_trace()\n\n with open('fluxo_multiescala_gr.txt', 'w') as arq:\n for volume in self.wells:\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat= True)[0]\n values = store_flux[volume].values()\n if volume in self.wells_inj:\n soma_inj += sum(values)\n else:\n soma_prod += sum(values)\n arq.write('gid:{0} , fluxo:{1}\\n'.format(gid, sum(values)))\n arq.write('\\n')\n arq.write('soma_inj:{0}\\n'.format(soma_inj))\n arq.write('soma_prod:{0}\\n'.format(soma_prod))\n\n return store_flux", "def convert_mass(self, event):\n try:\n #Compare other unit to one unit(kilograms)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Earth masses\": 5.97219e+24, \"Solar masses\": 1.9890000000000002e+30, \"carats\": 0.0002, \"cental\": 45.359237, \"decagrams\": 0.01, \"femtograms\": 1e-18, \"grains\": 6.479891000000001e-05, \"grams\": 0.001, \"hectograms\": 0.1, \"hundredweights\": 50.802345, \"kilograms\": 1.0, \"kilotonnes\": 1000000.0, \"megatonnes\": 1000000000.0, \"micrograms\": 1e-09, \"milligrams\": 1e-06, \"nanograms\": 1e-12, \"ounces(US & UK)\": 0.02835, \"ounces(precious metals)\": 0.031103, \"picograms\": 1e-15, \"pounds(US & UK)\": 0.453592, \"pounds(precious metals)\": 0.373242, \"slugs\": 14.593903, \"stones\": 6.350293, \"tonnes(metric)\": 1000.0, \"tons(UK)\": 1016.046909, \"tons(US)\": 907.18474}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def _build_parsed_values(self):\r\n # match the data inside the wrapper\r\n if len(self.raw_data) < ACCEL_BYTES or self.raw_data[0] != ACCEL_ID:\r\n raise SampleException(\"MopakODclAccelParserDataParticle: Not enough bytes provided in [%s]\",\r\n self.raw_data)\r\n fields = struct.unpack('>fffffffffI', self.raw_data[1:ACCEL_BYTES - 2])\r\n\r\n result = [self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_ACCELX, fields[0], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_ACCELY, fields[1], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_ACCELZ, fields[2], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_ANG_RATEX, fields[3], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_ANG_RATEY, fields[4], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_ANG_RATEZ, fields[5], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_MAGX, fields[6], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_MAGY, fields[7], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_MAGZ, fields[8], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_TIMER, fields[9], int)]\r\n\r\n return result", "def reformat_data(matlab_data) -> dict:\n data = dict()\n data[\"significant_ripples\"] = matlab_data.SignificantRipples - 1\n data[\"ripple_info\"] = matlab_data.RippleTimes\n data[\"inhibitory_neurons\"] = (\n np.array(matlab_data.InhibitoryNeurons) - 1\n ) # account for matlab indexing\n data[\"excitatory_neurons\"] = (\n matlab_data.ExcitatoryNeurons - 1\n ) # account for matlab indexing\n data[\"well_locations\"] = matlab_data.WellLocations\n data[\"well_sequence\"] = matlab_data.WellSequence\n data[\"spike_ids\"] = matlab_data.SpikeData[:, 1].astype(int) - 1\n data[\"spike_times_s\"] = matlab_data.SpikeData[:, 0]\n data[\"pos_times_s\"] = matlab_data.PositionData[:, 0]\n data[\"pos_xy_cm\"] = np.squeeze(matlab_data.PositionData[:, 1:-1])\n data[\"ripple_times_s\"] = data[\"ripple_info\"][:, :2]\n data[\"n_ripples\"] = len(data[\"ripple_times_s\"])\n data[\"n_cells\"] = np.max(data[\"spike_ids\"] + 1)\n return data", "def data_process():\n global localtime\n global value_dict\n sensor_types = sEtting.sensor_types\n sensor_values = []\n msg = None\n value_dict = collections.OrderedDict.fromkeys(sEtting.payload_header)\n value_dict[\"ver_format\"] = sEtting.ver_format\n value_dict[\"FAKE_GPS\"] = sEtting.fake_gps\n value_dict[\"app\"] = sEtting.app\n value_dict[\"ver_app\"] = sEtting.ver_app\n value_dict[\"device_id\"] = sEtting.device_id\n value_dict[\"date\"] = localtime.strftime(\"%Y-%m-%d\")\n value_dict[\"time\"] = localtime.strftime(\"%H:%M:%S\")\n value_dict[\"device\"] = sEtting.device\n\n for sensor in sensor_types:\n if sensor == 'pm25-at':\n value_dict[\"s_d0\"] = get_reading_csv(sensor)\n elif sensor == 'temperature':\n value_dict[\"s_t0\"] = get_reading_csv(sensor)\n elif sensor == 'humidity':\n value_dict[\"s_h0\"] = get_reading_csv(sensor)\n elif sensor == 'pm10-at':\n value_dict[\"s_d1\"] = get_reading_csv(sensor)\n else:\n print 'Not support sensor type.'\n if sEtting.fake_gps == 1:\n value_dict[\"gps_lat\"] = sEtting.fgps_lat\n value_dict[\"gps_lon\"] = sEtting.fgps_lon\n value_dict[\"gps_alt\"] = sEtting.fgps_alt\n value_dict[\"gps_fix\"] = 0\n else:\n value_dict[\"gps_lat\"] = get_gps()[0]\n value_dict[\"gps_lon\"] = get_gps()[1]\n value_dict[\"gps_alt\"] = get_gps()[2]\n value_dict[\"gps_fix\"] = gpsd.fix.mode\n value_dict[\"gps_num\"] = 0\n #if debug_enable == '0':\n msg = \"|\" + \"|\".join([\"=\".join([key, str(val)])\n for key, val in value_dict.items()])\n return msg\n #elif debug_enable == '1':\n # msg_debug = \",\".join([\"=\".join([key, str(val)]) for key, val in value_dict.items()])\n # return msg_debug", "def tcs2(self):\n S = self.M.allocState({})\n self.M.propagate(S, 0, 1)\n\n # set initial beam data\n S.ref_IonZ = self.refIonZ\n S.IonZ = self.IonZ\n\n S.moment0 = self.BC0\n S.moment1 = self.ENV0\n\n S.ref_IonEk = self.refIonEk\n \n S.phis = S.moment0[PS_S,:]\n S.IonEk = S.moment0[PS_PS,:]*MeVtoeV + S.ref_IonEk\n\n ### Reconstract data\n U = collections.OrderedDict()\n T = collections.OrderedDict()\n \n U,T = self.save(U, T, S)\n\n\n #S.clng = self.clng\n\n fin = len(self.M)\n\n H = self.M.propagate(S, 1, fin, observe=range(fin))\n \n ### Reconstract data\n U = collections.OrderedDict()\n T = collections.OrderedDict()\n\n for i in range(fin-1):\n U,T = self.save(U, T, H[i][1])\n\n return U,T", "def read_smx_fmv_12(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = eps_file.mdr_counter * n_node_per_line\n idx_nodes = np.arange(eps_file.mdr_counter).repeat(n_node_per_line)\n\n data = {}\n metadata = {}\n\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"f_land\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n fields = [\"sat_track_azi\", \"abs_line_number\"]\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan, long_nan),\n (\"latitude\", long_nan, long_nan),\n (\"swath_indicator\", byte_nan, byte_nan),\n (\"soil_moisture\", uint_nan, uint_nan),\n (\"soil_moisture_error\", uint_nan, uint_nan),\n (\"sigma40\", long_nan, long_nan),\n (\"sigma40_error\", long_nan, long_nan),\n (\"slope40\", long_nan, long_nan),\n (\"slope40_error\", long_nan, long_nan),\n (\"dry_backscatter\", long_nan, long_nan),\n (\"wet_backscatter\", long_nan, long_nan),\n (\"mean_surf_soil_moisture\", uint_nan, uint_nan),\n (\"soil_moisture_sensetivity\", ulong_nan, float32_nan),\n (\"correction_flags\", uint8_nan, uint8_nan),\n (\"processing_flags\", uint8_nan, uint8_nan),\n (\"aggregated_quality_flag\", uint8_nan, uint8_nan),\n (\"snow_cover_probability\", uint8_nan, uint8_nan),\n (\"frozen_soil_probability\", uint8_nan, uint8_nan),\n (\"innudation_or_wetland\", uint8_nan, uint8_nan),\n (\"topographical_complexity\", uint8_nan, uint8_nan)]\n\n for f, nan_val, new_nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = new_nan_val\n\n # sat_track_azi (uint)\n data[\"as_des_pass\"] = \\\n np.array(raw_data[\"SAT_TRACK_AZI\"].flatten()[idx_nodes] < 270)\n\n # modify longitudes from [0,360] to [-180,180]\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n fields = [\"param_db_version\", \"warp_nrt_version\"]\n for f in fields:\n data[f] = raw_data[\"PARAM_DB_VERSION\"].flatten()[idx_nodes]\n\n metadata[\"spacecraft_id\"] = int(eps_file.mphr[\"SPACECRAFT_ID\"][2])\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1), n_lines)\n\n data[\"line_num\"] = idx_nodes\n\n return data, metadata" ]
[ "0.5918188", "0.5905822", "0.5839146", "0.569138", "0.56422216", "0.5492949", "0.54187244", "0.54171056", "0.53424686", "0.5341912", "0.5314857", "0.5298684", "0.51961774", "0.5193409", "0.51925063", "0.51916564", "0.51853", "0.51811767", "0.51800555", "0.5175418", "0.51414967", "0.50957996", "0.50824875", "0.5060248", "0.50598085", "0.50527304", "0.5037073", "0.5030931", "0.50285023", "0.5019228" ]
0.8032454
0
Convert from [year, doy, hour, minte] to datetime object >>> sw = LFM('examples/data/solarWind/LFM_SWSMDAT') >>> sw._LFM__parseDate('1995 80 0 1') datetime.datetime(1995, 3, 21, 0, 1)
def __parseDate(self, dateStr): fields = [int(s) for s in dateStr.split() ] date = ( datetime.datetime(year=fields[0], month=1, day=1, hour=fields[2], minute=fields[3]) + datetime.timedelta(fields[1] - 1) ) return date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(s):\n m, d, y = s.split('/')\n mo = int(m)\n da = int(d)\n yr = int(y)\n d = datetime.date(yr, mo, da)\n return d", "def date_parse(date_string) -> datetime:\n return datetime.strptime(date_string, DATE_FMT)", "def parse_date(date):\n # MediaWiki API dates are always of the format\n # YYYY-MM-DDTHH:MM:SSZ\n # (see $formats in wfTimestamp() in includes/GlobalFunctions.php)\n return datetime.strptime(date, '%Y-%m-%dT%H:%M:%SZ')", "def parse_datestr(datestr):\n if datestr == \"\":\n return datetime.datetime(1970, 1, 1)\n \n match = re.match(date_regex, datestr)\n if not match:\n raise LLSDParseError(\"invalid date string '%s'.\" % datestr)\n \n year = int(match.group('year'))\n month = int(match.group('month'))\n day = int(match.group('day'))\n hour = int(match.group('hour'))\n minute = int(match.group('minute'))\n second = int(match.group('second'))\n seconds_float = match.group('second_float')\n microsecond = 0\n if seconds_float:\n microsecond = int(float('0' + seconds_float) * 1e6)\n return datetime.datetime(year, month, day, hour, minute, second, microsecond)", "def full_parse_date(text: str) -> Date:\n pred = dateparser.parse(text)\n return pred.year * 10000 + pred.month * 100 + pred.day", "def datetime(self):\n year = self._year\n # Check if the first word of the date (the month) is either january or\n # february, and increase the year by 1.\n if self._date.split(' ')[0].lower() in ['january', 'february']:\n year = int(year) + 1\n date_string = '%s %s %s' % (self._day,\n self._date,\n year)\n return datetime.strptime(date_string, '%a %B %d %Y')", "def parse_date(twitter_lame_datetime_string):\n from datetime import datetime\n return datetime.strptime(twitter_lame_datetime_string, \"%a %b %d %H:%M:%S +0000 %Y\")", "def parse_datetime(date_str: str) -> datetime:\n return dateutil.parser.parse(date_str)", "def _parse(self, date_str, format='%Y-%m-%d'):\n from pandas import to_datetime\n rv = to_datetime(date_str, format=format)\n if hasattr(rv, 'to_datetime'):\n rv = rv.to_datetime()\n return rv", "def parse_datetime(val):\n try: return maya.parse(val).datetime()\n except: return val", "def parse_date(date) -> datetime:\n\n if type(date) == datetime:\n return date\n try:\n date_object = datetime.strptime(date.replace(\" \", \"\"), \"%m/%d/%Y\")\n except (TypeError, ValueError) as exc:\n print(\"Cannot format time \" + str(exc), file=sys.stderr)\n return None\n return date_object", "def _parse_date(value):\n # Check for day-month pattern\n day_month_text = re.match(\"^(\\d{1,2})-(\\d{2})$\", value)\n if day_month_text:\n day = int(day_month_text.group(1))\n month = int(day_month_text.group(2))\n return datetime(datetime.now().year, month, day)\n\n # I assume Polish locale\n parts = value.strip().split(maxsplit=1)\n amount = int(parts[0])\n for hour_part in TIMEDELTA_HOURS:\n if hour_part in parts[1]:\n delta = timedelta(hours=amount)\n break\n else:\n for minute_part in TIMEDELTA_MINS:\n if minute_part in parts[1]:\n delta = timedelta(minutes=amount)\n break\n return datetime.now() - delta", "def parse_date(str_date):\n return ciso8601.parse_datetime(str_date)", "def convert_str2date(date):\n import datetime\n date = str(date)\n year = int(date[0:4])\n month = int(date[4:6])\n day = int(date[6:8])\n return datetime.datetime(year,month,day)", "def date_parser(date_arg):\n\n return datetime.datetime.strptime(date_arg, '%Y-%m-%d')", "def parse_date(date):\n\n return dateutil.parser.parse(date)", "def faststrptime(val):\n splits1 = val.split(\"/\")\n splits2 = splits1[2].split(\":\")\n return datetime.datetime(\n int(splits1[2][0:4]), # %Y\n int(splits1[0]), # %m\n int(splits1[1]), # %d\n int(splits2[0][4:len(splits2[0])]), # %H\n int(splits2[1]), # %M\n int(splits2[2][0:2]), # %s\n )", "def parse(self, str):\n values = self._exp.findall(str)\n if values is None or len(values) == 0:\n return None\n\n values = values[0]\n assert(len(values) == 3)\n\n day = int(values[self._dmy_idx[0]])\n month = int(values[self._dmy_idx[1]])\n year = int(values[self._dmy_idx[2]])\n\n return date(year, month, day)", "def parse_string_datetime(date):\n date_string_parse = date.split('/')\n year = int(date_string_parse[0])\n month = int(date_string_parse[1])\n day = int(date_string_parse[2])\n return year, month, day", "def _parseDate(self, dateString):\n if self.isInterval:\n offset = numpy.int64(0)\n else:\n offset = numpy.int64(1)\n\n if '_' in dateString:\n ymd, hms = dateString.split('_')\n else:\n if '-' in dateString:\n ymd = dateString\n # error can result if dateString = '1990-01'\n # assume this means '1990-01-01'\n if len(ymd.split('-')) == 2:\n ymd += '-01'\n hms = '00:00:00'\n else:\n if self.isInterval:\n ymd = '0000-00-00'\n else:\n ymd = '0000-01-01'\n hms = dateString\n\n if '.' in hms:\n hms = hms.replace('.', ':')\n\n if '-' in ymd:\n (self.years, self.months, self.days) \\\n = [numpy.int64(sub) for sub in ymd.split('-')]\n self.months -= offset\n self.days -= offset\n else:\n self.days = numpy.int64(ymd) - offset\n self.years = numpy.int64(0)\n self.months = numpy.int64(0)\n\n if ':' in hms:\n (self.hours, self.minutes, self.seconds) \\\n = [numpy.int64(sub) for sub in hms.split(':')]\n else:\n self.seconds = numpy.int64(hms)\n self.minutes = numpy.int64(0)\n self.hours = numpy.int64(0)\n self._setTotalSeconds()", "def _datetime(year, month, day, hour, minute, second):\n try:\n return datetime.datetime(\n year, month, day, hour, minute, second, tzinfo=datetime.timezone.utc\n )\n except ValueError:\n invalid_datetime = (\n f\"{year:04d}-{month:02d}-{day:02d} \"\n f\"{hour:02d}:{minute:02d}:{second:02d}\"\n )\n raise ftputil.error.ParserError(\n \"invalid datetime {0!r}\".format(invalid_datetime)\n )", "def datetime(s):\n default = datetime(1, 1, 1, 0, 0)\n if s is None or s == \"\":\n d = parse(\"0001-01-01\", default=default)\n else:\n d = parse(s, default=default)\n\n return d", "def parse_date(date_str):\n date_str = re.sub(r\"[ .-]\", '/', date_str.strip())\n date_time = datetime.strptime(date_str, \"%d/%m/%Y\")\n return date_time.date()", "def read_datetime(self, date_text):\n date_text = date_text.replace('/', '-')\n return datetime.strptime(date_text, '%Y-%m-%d %H:%M:%S')", "def parse_date_arg(date_arg):\n return datetime.datetime.strptime(date_arg, DATE_FORMAT)", "def parse_time(dt_str):\n year = int(dt_str[:4])\n month = int(dt_str[4:6] or 1)\n day = int(dt_str[6:8] or 1)\n hour = int(dt_str[8:10] or 0)\n minute = int(dt_str[10:12] or 0)\n return datetime(year, month, day, hour, minute)", "def parse_datetime(dt_str, format):\n t = time.strptime(dt_str, format)\n return datetime(t[0], t[1], t[2], t[3], t[4], t[5], t[6], pytz.UTC)", "def str_to_datetime(datestr):\n\n from dateutil import parser\n from datetime import datetime\n\n date = parser.parse(datestr)\n return date", "def parse_date(datestr):\n\n match = re.match(r\"^(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d)(?:\\ +\" r\"(\\d\\d):(\\d\\d)(?::(\\d\\d))?)?$\", datestr)\n if match:\n year = int(match.group(1))\n month = int(match.group(2))\n day = int(match.group(3))\n hour = match.group(4)\n if hour is not None:\n hour = int(hour)\n else:\n hour = 0\n minute = match.group(5)\n if minute is not None:\n minute = int(minute)\n else:\n minute = 0\n second = match.group(6)\n if second is not None:\n second = int(second)\n else:\n second = 0\n # return a \"seconds since epoch\" value assuming date given in UTC\n tm = (year, month, day, hour, minute, second, 0, 0, 0)\n return calendar.timegm(tm)\n else:\n return None", "def date_parse(date, *, convert_to_current_timezone: bool = False):\n if isinstance(date, (datetime.date, datetime.time, datetime.datetime)):\n return date\n\n # try to parse the date as an epoch datetime...\n # we start with epoch datetime as it is the most discrete form of a date\n try:\n date = epoch_to_date(date)\n except ValueError:\n # try to parse the given date with the dateutil module\n try:\n date = _dateutil_parser_parse(date)\n # if the given date could not be parsed by the dateutil module, try to parse the date using parsedatetime\n except ValueError as e:\n parsed_time_struct, parse_status = _parsedatetime_parse(date)\n\n # convert the parsed_time_struct to a datetime object and return it\n if parse_status > 0:\n date = time_struct_to_datetime(parsed_time_struct)\n else:\n message = f'Unable to convert the date \"{date}\" into a standard date format.'\n raise RuntimeError(message) from e\n\n if convert_to_current_timezone:\n date = date_make_timezone_aware(date)\n\n return date" ]
[ "0.66912717", "0.6641376", "0.64530736", "0.642597", "0.6422225", "0.63934094", "0.6359452", "0.6354985", "0.635257", "0.63273865", "0.6317693", "0.62617373", "0.6170135", "0.61521035", "0.61109614", "0.6102334", "0.60923606", "0.6085874", "0.608139", "0.6068376", "0.6065565", "0.6052139", "0.6021366", "0.6019536", "0.6007868", "0.5993234", "0.5970394", "0.5958499", "0.5954947", "0.5928603" ]
0.6771597
0
Returns the tissue expression as a tabular text file
def tissue_table(self, condition_tissue_id, use_means=True): table = ExpressionProfile.__profile_to_table( self.tissue_profile(condition_tissue_id, use_means=use_means) ) return table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_table2(df, eval_dir):\n\n out_file = os.path.join(eval_dir, 'table2.txt')\n\n\n with open(out_file, \"w\") as text_file:\n\n for idx, struc_name in enumerate(['LV', 'RV', 'Myo']):\n # new line\n header_string = ' & '\n line_string = '({}) '.format(struc_name)\n\n for p_idx, phase in enumerate(['ED', 'ES']):\n for measure in ['dice', 'assd', 'hd']:\n\n header_string += ' & {} ({}) '.format(phase, measure)\n\n dat = df.loc[(df['phase'] == phase) & (df['struc'] == struc_name)]\n\n if measure == 'dice':\n\n line_string += ' & {:.3f}\\,({:.3f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n else:\n line_string += ' & {:.2f}\\,({:.2f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n\n if p_idx == 0:\n header_string += ' & '\n line_string += ' & '\n\n header_string += ' \\\\\\\\ \\n'\n line_string += ' \\\\\\\\ \\n'\n\n if idx == 0:\n text_file.write(header_string)\n\n text_file.write(line_string)\n\n return 0", "def create_eqt_template(nodes, input_filename):\n output_filename = f'{input_filename[:-4]}_eqpt_sheet.txt'\n with open(output_filename, 'w', encoding='utf-8') as my_file:\n # print header similar to excel\n my_file.write('OPTIONAL\\n\\n\\n\\\n \\t\\tNode a egress amp (from a to z)\\t\\t\\t\\t\\tNode a ingress amp (from z to a) \\\n \\nNode A \\tNode Z \\tamp type \\tatt_in \\tamp gain \\ttilt \\tatt_out\\\n amp type \\tatt_in \\tamp gain \\ttilt \\tatt_out\\n')\n\n for node in nodes.values():\n if node.eqpt == 'ILA':\n my_file.write(f'{node.uid}\\t{node.to_node[0]}\\n')\n if node.eqpt == 'ROADM':\n for to_node in node.to_node:\n my_file.write(f'{node.uid}\\t{to_node}\\n')\n\n print(f'File {output_filename} successfully created with Node A - Node Z entries for Eqpt sheet in excel file.')", "def markdown_table(self, which):\n if which == 'C':\n coef = 'C'\n elif which == 'c':\n coef = 'c'\n elif which == 'f':\n coef = 'f'\n str = '|order|'\n for i in range(1,N+1):\n str = str + '$%s_{%d}$ |' % (coef,i)\n str = str + '\\n|'\n for i in range(1,N+1):\n str = str + '-|'\n str = str + '\\n'\n for i in range(1,self.N+1):\n str = str + (self.dat[i]).markdown_row(self.N, which)\n return str", "def read_opl_text(tdf,text, commaseperator = True):\n verify(stringish(text), \"text needs to be a string\")\n # probably want to verify something about the ticdat factory, look at the wiki\n dict_with_lists = defaultdict(list)\n NONE, TABLE, ROW, ROWSTRING, ROWNUM, FIELD, STRING, NUMBER = 1, 2, 3, 4, 5, 6, 7, 8\n mode = NONE\n field = ''\n table_name = ''\n row = []\n\n def to_number(st, pos):\n try:\n return float(st)\n except ValueError:\n verify(False,\n \"Badly formatted string - Field '%s' is not a valid number. Character position [%s].\" % (st, pos))\n\n for i,c in enumerate(text):\n if mode not in [STRING, ROWSTRING] and (c.isspace() or c == '{' or c == ';'):\n if mode in [NUMBER, ROWNUM, FIELD] and not commaseperator:\n c = ','\n else:\n continue\n if mode in [STRING, ROWSTRING]:\n if c == '\"':\n if text[i-1] == '\\\\':\n field = field[:-1] + '\"'\n else:\n if mode is ROWSTRING:\n row.append(field)\n field = ''\n verify(len(row) == len((dict_with_lists[table_name] or [row])[0]),\n \"Inconsistent row lengths found for table %s\" % table_name)\n dict_with_lists[table_name].append(row)\n row = []\n mode = TABLE\n else:\n mode = FIELD\n else:\n field += c\n elif c == '=':\n verify(mode is NONE, \"Badly formatted string, unrecognized '='. Character position [%s]\"%i)\n verify(len(table_name) > 0, \"Badly formatted string, table name can't be blank. Character position [%s]\"%i)\n verify(table_name not in dict_with_lists.keys(), \"Can't have duplicate table name. [Character position [%s]\"%i)\n dict_with_lists[table_name] = []\n mode = TABLE\n elif c == '<':\n verify(mode is TABLE, \"Badly formatted string, unrecognized '<'. Character position [%s]\"%i)\n mode = ROW\n\n elif c == ',':\n verify(mode in [ROW, FIELD, NUMBER, ROWNUM, TABLE], \"Badly formatted string, unrecognized ','. \\\n Character position [%s]\"%i)\n if mode is TABLE:\n continue\n if mode is ROWNUM:\n field = to_number(field,i)\n row.append(field)\n field = ''\n verify(len(row) == len((dict_with_lists[table_name] or [row])[0]),\n \"Inconsistent row lengths found for table %s\" % table_name)\n dict_with_lists[table_name].append(row)\n row = []\n mode = TABLE\n else:\n if mode is NUMBER:\n field = to_number(field,i)\n row.append(field)\n field = ''\n mode = ROW\n\n elif c == '\"':\n verify(mode in [ROW, TABLE], \"Badly formatted string, unrecognized '\\\"'. Character position [%s]\"%i)\n if mode is ROW:\n mode = STRING\n if mode is TABLE:\n mode = ROWSTRING\n\n elif c == '}':\n verify(mode in [TABLE, ROWNUM], \"Badly formatted string, unrecognized '}'. Character position [%s]\"%i)\n if mode is ROWNUM:\n field = to_number(field,i)\n row.append(field)\n field = ''\n verify(len(row) == len((dict_with_lists[table_name] or [row])[0]),\n \"Inconsistent row lengths found for table %s\" % table_name)\n dict_with_lists[table_name].append(row)\n row = []\n table_name = ''\n mode = NONE\n\n elif c == '>':\n verify(mode in [ROW, FIELD, NUMBER], \"Badly formatted string, unrecognized '>'. \\\n Character position [%s]\"%i)\n if mode is NUMBER:\n field = to_number(field,i)\n mode = FIELD\n if mode is FIELD:\n row.append(field)\n field = ''\n verify(len(row) == len((dict_with_lists[table_name] or [row])[0]),\n \"Inconsistent row lengths found for table %s\"%table_name)\n dict_with_lists[table_name].append(row)\n row = []\n mode = TABLE\n else:\n verify(mode in [NONE, ROW, ROWNUM, FIELD, NUMBER], \"Badly formatted string, \\\n unrecognized '%s'. Character position [%s]\"%(c,i))\n if mode is NONE:\n table_name += c\n elif mode is TABLE:\n mode = ROWNUM\n field += c\n else:\n mode = NUMBER\n field += c\n assert not find_duplicates_from_dict_ticdat(tdf, dict_with_lists), \\\n \"duplicates were found - if asserts are disabled, duplicate rows will overwrite\"\n\n return tdf.TicDat(**{k.replace(tdf.opl_prepend,\"\",1):v for k,v in dict_with_lists.items()})", "def create_txt_files(self, op_dir=None):\n for tb_nm, tb_cont in list(self.tables_info['tables'].items()):\n op_fl = '{}_{}.txt'.format(self.report_basename, tb_nm)\n if op_dir:\n op_fl = os.path.join(op_dir, op_fl)\n with open(op_fl, 'w') as TXT:\n TXT.write(tb_cont)", "def expr2truthtable(expr):\n inputs = [ttvar(v.names, v.indices) for v in expr.inputs]\n return truthtable(inputs, expr.iter_image())", "def parse_table_to_tracy_file(latname: str, df: pd.DataFrame, filename: str) -> None:\n save_string(parse_table_to_tracy_string(latname, df), filename)", "def export(self, fname):\n f = open(fname, 'w')\n for ue in self.ue_list:\n line_components = list()\n line_components.append(ue.expression)\n line_components.append(ue.meaning)\n print >>f, '\\t'.join(line_components).encode('utf-8')", "def read_exprs_as_df(fn):\n df = pd.read_table(fn, index_col=0).T\n return df", "def _get_staff_report_tab_delimited(self):\n stringio = io.StringIO()\n writer = csv.writer(stringio, delimiter='\\t',\n quoting=csv.QUOTE_MINIMAL)\n for record in self._get_staff_report_data():\n writer.writerow(record)\n return stringio.getvalue()", "def _get_text_rendering(self, table):\n text_table = ''\n base_variable = table.get_variable().replace(\"'\", '')\n\n if base_variable == self._system.get_settings().user_input:\n text_table += '\\n[user]\\t'\n elif base_variable == self._system.get_settings().system_output:\n text_table += '[system]\\t'\n else:\n text_table += '[' + base_variable + ']\\t'\n\n for value in table.get_values():\n if not isinstance(value, NoneVal):\n content = str(value)\n if table.get_prob(value) < 0.98:\n content += ' (' + StringUtils.get_short_form(table.get_prob(value)) + ')'\n\n text_table += content + '\\n\\t\\t'\n\n if base_variable == self._system.get_settings().user_input:\n text_table += '\\n'\n\n text_table = text_table[0:-3]\n return text_table", "def srt_to_txt(srt_file):\n text = ''\n for index, item in enumerate(srt_file):\n if item.text.startswith(\"[\"):\n continue\n text += \"(%d) \" % index\n text += item.text.replace(\"\\n\", \"\").strip(\"...\").replace(\n \".\", \"\").replace(\"?\", \"\").replace(\"!\", \"\")\n text += \". \"\n return text", "def table_maker():\r\n try:\r\n off_copy = off.copy()\r\n man_copy = man.copy()\r\n exe_copy = exe.copy()\r\n ceo_copy = ceo.copy()\r\n list_of_lists = [off_copy, man_copy, exe_copy, ceo_copy]\r\n\r\n for i in list_of_lists:\r\n for j in i:\r\n if type(j) == str:\r\n continue\r\n else:\r\n raise ValueError('All elements must be strings')\r\n\r\n row_num = max(len(off_copy), len(man_copy),\r\n len(exe_copy), len(ceo_copy))\r\n for i in list_of_lists:\r\n if len(i) != row_num:\r\n diff = row_num - len(i)\r\n for j in range(diff):\r\n i.append('')\r\n\r\n t = PrettyTable(\r\n ['Office Workers', 'Managers', 'Executives', 'CEO'])\r\n for i in range(row_num):\r\n t.add_row([off_copy[i], man_copy[i], exe_copy[i], ceo_copy[i]])\r\n\r\n with open('Employee Table.txt', 'w') as f:\r\n f.write(str(t))\r\n\r\n except FileNotFoundError:\r\n print(\"Error: No file entered\")", "def tabular_data(self):\n path = CFG.GRAPHS_DIR\n chdir(path)\n\n if self.experiment_count == 1:\n f = open(self.tablefile, 'w')\n f.write(self.print_border_line())\n f.write(self.table_header())\n f.write(self.print_border_line())\n f.write(self.pretty_string(\"Functions\"))\n f.write(self.pretty_string(\"Batch Size\"))\n f.write(self.pretty_string(\"Training (%)\"))\n f.write(self.pretty_string(\"Testing (%)\", True))\n f.write('\\n')\n f.write(self.print_border_line())\n f.close()\n\n f = open(self.tablefile, 'a')\n f.write(self.pretty_string(self.function_name))\n f.write(self.pretty_string(str(self.batch_size)))\n f.write(self.pretty_string(self.tr_mean_str))\n f.write(self.pretty_string(self.test_mean_str, True))\n f.write('\\n')\n f.close()", "def _symbols_table_file_content(\n rule_set: _RewriteRuleSet) -> Generator[str, None, None]:\n\n def _line(symbol: str, index: int) -> str:\n return f\"{symbol}\\t{index}\\n\"\n\n fst_symbols = []\n\n for rule in rule_set.rule:\n fst_symbols.extend(_symbols_of_input(rule.input))\n fst_symbols.extend(_symbols_of_output(rule.output))\n\n unique_symbols = set(fst_symbols).difference({common.EPSILON})\n complex_symbols = [s for s in unique_symbols if len(s) > 1]\n\n index = 983040 # start of the Unicode private use area.\n\n for symbol in sorted(complex_symbols):\n yield _line(symbol, index)\n index += 1\n\n logging.info(\"generated complex symbols file content\")", "def table(self, text):\n text = text + \"\\n\\n\"\n pattern = re.compile(r'^(?:table(_?%(s)s%(a)s%(c)s)\\.(.*?)\\n)?^(%(a)s%(c)s\\.? ?\\|.*\\|)[\\s]*\\n\\n'\n % {'s': self.table_span_re,\n 'a': self.align_re,\n 'c': self.c},\n re.S | re.M | re.U)\n return pattern.sub(self.fTable, text)", "def issue_text_dump(issue):\n file_path = os.path.join(BIO_REQUESTS_DIR,\n str(issue.id),\n str(issue.id) + '_' + str(issue.subject) + '_redmine_details.txt')\n with open(file_path, 'w+') as file:\n for attr in dir(issue):\n file.write('{}: {}\\n\\n'.format(attr, getattr(issue, attr)))\n return file_path", "def hg_report_txt(self):\n return op.join(self.root_dir, 'human_evaluation_summary.txt')", "def _t_test_results(self):\n t, df, p = self.api.m.math_utils.welchs_t_test(\n self.lkgr.values, self.fkbr.values)\n lines = [\n 'LKGR values: %r' % self.lkgr.values,\n 'FKBR values: %r' % self.fkbr.values,\n 't-statistic: %r' % t,\n 'deg. of freedom: %r' % df,\n 'p-value: %r' % p,\n 'Confidence score: %r' % (100 * (1 - p))\n ]\n return '\\n'.join(lines)", "def tab_delim_table(self):\n self.generate()\n\n header = ' \\t '.join([r'{: ^7}'.format(col) for col in self.columns])\n lines = []\n for row in self.rows:\n bits = []\n for col in self.columns:\n if col in self.formatters:\n bits.append(self.formatters[col].format(row[col]))\n else:\n bits.append(self.formatters.get(col, '{: ^7}').format(row[col] if row[col] else ''))\n lines.append(' \\t '.join(bits))\n\n return \"{}\\n{}\".format(header, '\\n'.join(lines))", "def hdf2txt_tmp(fname):\n print 'converting HDF5 -> temporary ASCII ...',\n data, fin = readh5(fname)\n f = tf.NamedTemporaryFile(suffix='') # create temp file\n np.savetxt(f, data, fmt='%f')\n f.seek(0)\n closeh5(fin)\n print 'done'\n return f", "def print_table1(df, eval_dir):\n\n out_file = os.path.join(eval_dir, 'table1.txt')\n\n header_string = ' & '\n line_string = 'METHOD '\n\n\n for s_idx, struc_name in enumerate(['LV', 'RV', 'Myo']):\n for measure in ['dice', 'assd']:\n\n header_string += ' & {} ({}) '.format(measure, struc_name)\n\n dat = df.loc[df['struc'] == struc_name]\n\n if measure == 'dice':\n line_string += ' & {:.3f}\\,({:.3f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n else:\n line_string += ' & {:.2f}\\,({:.2f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n\n if s_idx < 2:\n header_string += ' & '\n line_string += ' & '\n\n header_string += ' \\\\\\\\ \\n'\n line_string += ' \\\\\\\\ \\n'\n\n with open(out_file, \"w\") as text_file:\n text_file.write(header_string)\n text_file.write(line_string)\n\n return 0", "def latex_table():\n \n t = Table.read('../data/stream_origin.fits')\n N = len(t)\n \n f = open('../paper/stream_origin.tex', 'w')\n for i in range(N):\n t_ = t[i]\n for k in t_.colnames:\n if (t_[k]==np.nan) | (t_[k]=='nan'):\n t_[k] = '\\dots'\n #f.write('{:s} & {:s} & {:s} & {:s} & {:.1f}\\\\\\\\ \\n'.format(t_['Name'], t_['host'], t_['progenitor'], t_['type'], t_['feh']))\n line = '{:s} & {:s} & {:s} & {:s} & {:s}\\\\\\\\ \\n'.format(t_['Name'], t_['host'], t_['progenitor'], t_['friends'], t_['type'])\n f.write(line)\n print(line)\n \n f.close()", "def parse_multeval_results_table(fname, task, testset):\n result = []\n\n def parse_line(line, headers):\n values = list(filter(\n lambda s: s, map(lambda s: s.strip(), line.split(' '))))\n return OrderedDict({k: v for k, v in zip(headers, values)})\n\n def convert_to_link(sys_name):\n if sys_name.startswith('baseline:'):\n return sys_name\n url = url_for('results', task=task, testset=testset,\n system=sys_name)\n return f'<a href=\"{url}\">{sys_name}</a>'\n\n with open(fname) as f:\n header = f.readline().strip()\n n_runs = int(header.split()[4][-1])\n metrics = header.split()[5:]\n # add description of parentheses fields\n metrics = [f'{m} <em>(\\u03C3-sel/\\u03C3-opt/p)</em>' for m in metrics]\n headers = [f'SYSTEM ({n_runs} runs)'] + metrics\n # Skip empty line\n f.readline()\n # Get baseline system\n result.append(parse_line(f.readline().strip(), headers))\n baseline_name = result[0][headers[0]].split()[-1]\n for line in f:\n line = line.strip()\n if line:\n system = parse_line(line, headers)\n # Skip double baseline\n if system[headers[0]] != baseline_name:\n result.append(system)\n\n return pd.DataFrame.from_dict(result).to_html(\n index=False, border=0, justify='left', escape=False,\n classes=\"display compact row-border multeval\",\n formatters={\n # Convert system names to links\n headers[0]: lambda s: convert_to_link(s)}), baseline_name", "def write_model_table(n, params, path, filename='MODELS.txt'):\n print('Writing MODEL.txt table')\n runlist = np.arange(1, n + 1, dtype='int')\n\n p = dict(params)\n p['run'] = runlist\n\n cols = ['run', 'z', 'y', 'x', 'accrate', 'qb', 'qnuc',\n 'tshift', 'acc_mult', 'qb_delay', 'mass', 'radius', 'gravity',\n 'accmass', 'accdepth']\n ptable = pd.DataFrame(p)\n ptable = ptable[cols] # Fix column order\n\n table_str = ptable.to_string(index=False, formatters=FORMATTERS)\n\n filepath = os.path.join(path, filename)\n with open(filepath, 'w') as f:\n f.write(table_str)", "def create_tsv(df, filename=None):\n table = df.to_string()\n lines = table.splitlines()\n index_name = lines.pop(1).strip()\n lines[0] = index_name + lines[0][len(index_name):]\n table = '\\n'.join(lines)\n if filename is not None:\n with open(filename, 'w') as f:\n f.write(table)\n else:\n return table", "def sirv_report_txt(self):\n return op.join(self.root_dir, 'SIRV_evaluation_summary.txt')", "def to_tsv(obj: ConfiguredBaseModel, file: str) -> str:\n\n # Extract headers and rows from object\n if isinstance(obj, Entity):\n headers = obj.dict().keys()\n rows = [list(obj.dict().values())]\n elif isinstance(obj, (AssociationCountList, HistoPheno, Results)):\n if not obj.items:\n headers = get_headers_from_obj(obj)\n rows = []\n else:\n headers = obj.items[0].dict().keys()\n rows = [list(item.dict().values()) for item in obj.items]\n else:\n raise TypeError(FMT_INPUT_ERROR_MSG)\n\n fh = open(file, \"w\") if file else sys.stdout\n writer = csv.writer(fh, delimiter=\"\\t\")\n writer.writerow(headers)\n for row in rows:\n writer.writerow(list(row))\n if file:\n fh.close()\n console.print(f\"\\nOutput written to {file}\\n\")\n\n return", "def tsv_generator(file):\n for line in fileinput.input(file):\n article, summary = line.strip().split(\"\\t\")\n yield (article, summary)", "def parse_tcode_outfile(fname,offset=0):\n command = \"cat %s | grep -v '#' | sed '/^$/d' | sed 1d\" % (fname) \n ci,co = popen2(command)\n ci.close()\n out = co.readlines()\n co.close()\n retlist = []\n for line in out:\n line = line.strip()\n while \" \" in line: line = line.replace(\" \",\" \")\n parts = line.split(\" \",3)\n retlist.append( (int(parts[0])+offset, int(parts[1])+offset, float(parts[2]), parts[3] ) )\n return retlist" ]
[ "0.5715523", "0.5585215", "0.5510229", "0.5483904", "0.5433547", "0.539088", "0.53867865", "0.5360147", "0.5359347", "0.5353965", "0.5342304", "0.5327082", "0.5309732", "0.5296553", "0.5263918", "0.52535033", "0.52193636", "0.5209992", "0.5206868", "0.5203945", "0.51973903", "0.51866806", "0.5171204", "0.51601815", "0.5148548", "0.51336265", "0.5126641", "0.51261586", "0.5107367", "0.5085072" ]
0.5600994
1
Checks if the mean expression value in any conditions in the plot is higher than the desired cutoff
def low_abundance(self, cutoff=10): data = json.loads(self.profile) checks = [mean(v) > cutoff for _, v in data["data"].items()] return not any(checks)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def _has_noise(self) -> bool:\n min = self.array.min()\n max = self.array.max()\n near_min, near_max = np.percentile(self.array, [0.5, 99.5])\n max_is_extreme = max > near_max * 1.25\n min_is_extreme = (min < near_min * 0.75) and (\n abs(min - near_min) > 0.1 * (near_max - near_min)\n )\n return max_is_extreme or min_is_extreme", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def is_outlier(hist, value):\n stdev = np.std(hist, axis=0)\n avg = np.average(hist[-15:], axis=0)\n if any(lf for lf, avg, std in zip(value, avg, stdev) if lf > avg + 3 * std) or \\\n any(lf for lf, avg, std in zip(value, avg, stdev) if lf < avg - 3 * std):\n return True\n return False", "def test_signal_threshold(df_phys, signal, threshold):\n df_signal = df_phys[df_phys[\"Signal\"] == signal][\"Physical Value\"]\n\n stats = df_signal.agg([\"count\", \"min\", \"max\", \"mean\", \"std\"])\n delta = stats[\"max\"] - stats[\"min\"]\n\n if delta > threshold:\n print(f\"{signal} exhibits a 'max - min' delta of {delta} exceeding threshold of {threshold}\")", "def __call__(self, x):\n return np.mean(self.observations <= x)", "def close_to_exceeding(self) -> bool:\n mean = self.current / self.num_cuts\n if self.max_frames is not None:\n return self.current + mean > self.max_frames\n if self.max_samples is not None:\n return self.current + mean > self.max_samples\n if self.max_duration is not None:\n return self.current + mean > self.max_duration\n return False", "def do(self, x, typ):\n try:\n mean,stddev = self.mean_stddev()\n except StdDevFilterException:\n self.insert_value(x)\n return x,True\n\n self.insert_value(x)\n\n # limit dispersion, refuse new value when too far away from mean\n e = abs(x-mean)\n if e <= self.alpha*stddev:\n return x,True\n else:\n return mean,False", "def above_threshold(self, value):\n # We use floating point number here so we have to take care\n return finf(value,self.min) or finf(self.max,value)", "def check_difference_with_conditions(ymin,ymax,variable):\n\n if variable.lower() in ['grain type (Swiss Code F1F2F3)', 'grain type']:\n raise ValueError(\"You are trying to difference a categorical variable like grain type!\")\n\n if all([ymin,ymax]):\n return 0\n else:\n raise ValueError('To use the difference method you must specify y limits.')", "def conditional_mean(self, gp):\n raise NotImplementedError", "def aboveThresholdAlarm(self, data):\n\n if(self.calculateAverage(data) > self.threshold and self.aboveThreshold):\n message = \"Average above acceptable amount for \" + self.subjectName + \".\"\n if(self.log):\n logging.info(message)\n\n self.sendToAllSubscribers(message, \"Alert: Average performance above threshold.\")", "def should_average(self):\n return self._should_average", "def checkStdDev(df,thr):\n greaterThanThreshold = True\n positions= np.array([])\n for i in range(1,df.shape[0]):\n stdDev = np.std(df.iloc[i,1:].astype(np.longdouble))\n if (stdDev < thr):\n greaterThanThreshold = False\n positions = np.append(positions,i)\n \n return greaterThanThreshold", "def has_value_thres(x, **kwargs):\n thres = kwargs.get('thres', 0.1)\n has_value_percentage = (~np.isnan(x)).mean(axis=0)\n return has_value_percentage > thres", "def belowThresholdAlarm(self, data):\n\n if(self.belowThreshold and self.calculateAverage(data) < self.threshold):\n message = \"Average below acceptable amount for \" + self.subjectName + \".\"\n if(self.log):\n logging.info(message)\n\n self.sendToAllSubscribers(message, \"Alert: Average performance below threshold.\")", "def test_xmax_set(self):\n\t\tdetails = self.watcher.analyze(layers=[17], xmax=-1)\n\t\tactual_alpha = details.alpha.to_numpy()[0]\n\t\texpected_alpha = 3.0\n\t\tself.assertAlmostEqual(actual_alpha,expected_alpha, delta=0.1 )", "def _is_mlc_peak_in_window(\n self, window, height_threshold, edge_threshold, picket_peak_val\n ) -> bool:\n if self.orientation == Orientation.UP_DOWN:\n std = np.std(window, axis=1)\n else:\n std = np.std(window, axis=0)\n is_above_height_threshold = np.max(window) > height_threshold * picket_peak_val\n is_not_at_edge = max(std) < edge_threshold * np.median(std)\n return is_above_height_threshold and is_not_at_edge", "def find_cutoff(self, roi_results):\n int_ravel = roi_results[~np.isnan(roi_results[:, 3]), 3]\n mean = 0\n std = 0\n\n for _ in range(10):\n # for 10 times, fit norm to intensity and throw away outliers\n mean, std = norm.fit(int_ravel)\n int_ravel = int_ravel[int_ravel < mean + std * self.threshold_sigma]\n\n return mean + self.threshold_sigma * std", "def demo_one_filter():\n data = [1.3, 2.7, 0.8, 4.1, 4.3, -0.1]\n avg = np.mean(data)\n print \"average value is:\", avg\n\n # create iterator that filters to keep only above average data\n above_avg_iter = filter(lambda x: x > avg, data) # returns iterator for data above the avg\n\n print \"values strictly above average are:\", list(above_avg_iter)", "def bare_soil(sil):\n sil.get_color_params()\n if ((sil.l_mean < 160) and (sil.a_std > 3)):\n return False\n else:\n return True", "def do_lowzcut_check(cat, subdir):\n lowzcut = cat.lowzcut\n cat.lowzcut = True\n cat.plot_omega_dla(zmax=5,label=\"Cutting\")\n cat.lowzcut = False\n cat.plot_omega_dla(zmax=5,label=\"Not cutting\")\n plt.legend(loc=0)\n save_figure(path.join(subdir,\"omega_gp_lowz\"))\n plt.clf()\n\n cat.lowzcut = True\n cat.plot_line_density(zmax=5,label=\"Cutting\")\n cat.lowzcut = False\n cat.plot_line_density(zmax=5,label=\"Not cutting\")\n plt.ylim(0,0.12)\n plt.legend(loc=0)\n save_figure(path.join(subdir,\"dndx_gp_lowz\"))\n plt.clf()\n cat.lowzcut = lowzcut", "def _find_cutoff(self):\n cutoff = 1\n while ((self.linear_rstar_unnorm(cutoff) -\n self.turing_rstar_unnorm(cutoff))**2\n > self.approx_turing_variance(cutoff)):\n cutoff += 1\n return cutoff", "def check_peak_win(self):\n if self.peak_win[0] < 0.0:\n self.peak_win[0] = 0.0\n if self.logger is not None:\n self.logger.warning(('Start of peak window < 0 sec for cond: {}. ' +\n 'Setting to 0.').format(self.cond))\n if self.peak_win[1] > self.psc_dur:\n self.peak_win[1] = self.psc_dur\n if self.logger is not None:\n logger.warning(('End of peak window is longer than trial HRF ' +\n 'for cond: {}. Truncating.').format(self.cond))\n return", "def _threshold(data, sigma=2.0):\r\n return np.mean(data)-sigma*np.sqrt(np.var(data))", "def get_strict_valence_flag(self):\n d_min = self.fmodel.f_obs().d_min()\n return (d_min < self.params.d_min_strict_valence)" ]
[ "0.63532436", "0.63532436", "0.63532436", "0.6349902", "0.6218509", "0.6218509", "0.6218509", "0.6137946", "0.60649145", "0.6062961", "0.6055984", "0.59992933", "0.5966027", "0.5953051", "0.58949625", "0.5885765", "0.579604", "0.5673176", "0.56725395", "0.56724554", "0.5664932", "0.5652505", "0.5640736", "0.56392604", "0.56364155", "0.5627149", "0.55939347", "0.5580243", "0.55794096", "0.5574232" ]
0.67541367
0
Applies a conversion to the profile, grouping several condition into one more general feature (e.g. tissue).
def tissue_profile(self, condition_tissue_id, use_means=True): ct = ConditionTissue.query.get(condition_tissue_id) condition_to_tissue = json.loads(ct.data) profile_data = json.loads(self.profile) output = ExpressionProfile.convert_profile( condition_to_tissue, profile_data, use_means=use_means ) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_profile(condition_to_tissue, profile_data, use_means=True):\n tissues = list(set(condition_to_tissue[\"conversion\"].values()))\n\n output = {}\n\n for t in tissues:\n valid_conditions = [\n k\n for k in profile_data[\"data\"]\n if k in condition_to_tissue[\"conversion\"]\n and condition_to_tissue[\"conversion\"][k] == t\n ]\n valid_values = []\n for k, v in profile_data[\"data\"].items():\n if k in valid_conditions:\n if use_means:\n valid_values.append(mean(v))\n else:\n valid_values += v\n\n output[t] = valid_values if len(valid_values) > 0 else [0]\n\n return {\n \"order\": condition_to_tissue[\"order\"],\n \"colors\": condition_to_tissue[\"colors\"],\n \"data\": output,\n }", "def _score_to_decision(self, score):", "def mixed_precision_lint(self):\n report = OrderedDict()\n\n df = self.convs\n df = df.loc[df['precision'] == 'INT8'].copy()\n for index, conv in df.iterrows():\n inputs, outputs = create_activations(conv)\n inf = inputs[0].format[:4]\n outf = outputs[0].format[:4]\n found = inf == 'Int8' and outf != 'Int8'\n if found:\n report[conv.Name] = OrderedDict({\n 'name': conv.Name,\n 'tactic': conv.tactic,\n 'subtype': conv.subtype,\n 'hazard': \"Quantized Convolution has float outputs.\",\n 'mitigation': \"Consider adding quantization after the convolution.\",\n 'help': \"Quantized Convolution with float outputs is ill advised \"\n \"for memory-limited convolutions.\"\n })\n return report", "def cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_cast(*args)", "def create_profiles(tree, attribute, out_feature='altitude',\n filtering_rule='direct', profiles_name='unknow'):\n data = []\n description = []\n\n # Create Trees\n try:\n if isinstance(tree, trees.Tree):\n # Dual tree\n ndual = False\n thinning_tree = None\n thickening_tree = tree\n else:\n # Non dual trees\n ndual = True\n thinning_tree = tree[0]\n thickening_tree = tree[1]\n except:\n raise TypeError('Parameter tree_type must be a tuple or a single' \\\n ' instance of Tree, not {}'.format(tree))\n\n out_features = (out_feature, ) if isinstance(out_feature, str) else out_feature\n\n iter_count = (sum(len(x) for x in attribute.values()) * (1 + ndual) + \\\n len(attribute)) * len(out_features)\n ttq = tqdm(desc='Total', total=iter_count)\n for att, thresholds in attribute.items():\n tq = tqdm(total=(len(thresholds) * (1 + ndual) + 1) * len(out_features), desc=att)\n\n for out_feature in out_features:\n profiles = []; profiles_description = []\n of = att if out_feature == 'same' else out_feature\n\n if ndual:\n # thinning\n prof, desc = _compute_profiles(thinning_tree, att,\n thresholds[::-1], (ttq, tq), of, filtering_rule)\n profiles += prof\n profiles_description += desc\n\n # Origin\n tq.update(); ttq.update()\n profiles += [thickening_tree.reconstruct(feature=of)]\n profiles_description += [{'operation': 'copy feature {}'.format(of)}]\n\n # thickening\n prof, desc = _compute_profiles(thickening_tree, att, thresholds,\n (ttq, tq), of, filtering_rule)\n profiles += prof\n profiles_description += desc\n\n\n data += [np.stack(profiles)]\n description += [{'tree': thickening_tree.get_params(),\n 'name': profiles_name,\n 'attribute': att,\n 'profiles': profiles_description,\n 'filtering rule': filtering_rule,\n 'out feature': of}]\n tq.close()\n ttq.close()\n\n return Profiles(data, description)", "def do_analyse(args):\n\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', None)\n\n score = 'score'\n\n # Read in the results, and add a boolean target column.\n df = pd.read_csv(args.results, index_col=0)\n df['target'] = df['verify_speaker'] == df['enrol_speaker']\n\n # Calculate ideal 0.01% threshold over the multi-session data.\n nontarget_df = df.loc[df['target'] == False].sort_values(score, ascending=False)\n nontarget_count = nontarget_df[score].count()\n th_calc = nontarget_df.iloc[int(nontarget_count * (1 / 10000))][score]\n\n # Now filter the data so that we only consider mono-session enrolment and verification.\n df = df.loc[df['verify_room'] == df['enrol_room']]\n target_df = df.loc[df['target'] == True].sort_values(score, ascending=False)\n nontarget_df = df.loc[df['target'] == False].sort_values(score, ascending=False)\n target_count = target_df[score].count()\n nontarget_count = nontarget_df[score].count()\n\n # Calculate FA/FR for the user-defined threshold.\n th_user = args.th_user\n fr_user = target_df.loc[target_df[score] < th_user][score].count()\n fa_user = nontarget_df.loc[nontarget_df[score] > th_user][score].count()\n frr_user = fr_user / target_count\n far_user = fa_user / nontarget_count\n label_user = 'User Threshold: th {:.4f}, FR {} ({:.3f}%), FA {} ({:.3f}%)'.format(th_user, fr_user, frr_user * 100,\n fa_user, far_user * 100)\n\n # Calculate the FA/FR for the ideal threshold calculated from the multi-session data.\n fr_calc = target_df.loc[target_df[score] < th_calc][score].count()\n fa_calc = nontarget_df.loc[nontarget_df[score] > th_calc][score].count()\n frr_calc = fr_calc / target_count\n far_calc = fa_calc / nontarget_count\n label_calc = 'Calc Threshold: th {:.4f}, FR {} ({:.3f}%), FA {} ({:.3f}%)'.format(th_calc, fr_calc, frr_calc * 100,\n fa_calc, far_calc * 100)\n\n # Print the stats.\n print('\\nTarget Stats:')\n print(target_df[score].describe())\n print('\\nNon-Target Stats:')\n print(nontarget_df[score].describe())\n print('\\nThresholds:')\n print(label_user)\n print(label_calc)\n\n # Paint the graphs.\n paint_graph(score, 'verify_room', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'enrol_room', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'verify_speaker', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'enrol_speaker', df, th_user, label_user, th_calc, label_calc)", "def preprocess(df, edit = False):\n\tdf['binary_income'] = np.where(df['binary_income'].str.contains('-'), 0, 1)\n\ty = df['binary_income']\n\n\tTRAINING_SET_MEAN = y.mean() # For ensembling w/ linear combination.\n\n\tif edit:\n\t\t# If they make over $100 / hr but have no capital gains and losses, assume the wage entry is a mistake and replace with median.\n\t\tdf['wage'] = np.where(((df['wage'] > 100) & (df['capital_gains'] == 0) & (df['capital_gains'] == 0)), df['wage'].median(), df['wage'])\n\t\t# Total calculated earnings for the year. \n\t\tdf['ttl_clcltd_ernings'] = 40*df['wage']*df['weeks']\n\n\t\tdf['professional'] = np.where(df['majoroc'].str.contains('Professional|Executive|Sales|Precision',regex = True), 1,0)\n\t\tdf['white'] = np.where(df['mace'].str.contains('White|Asian',regex = True), 1,0)\n\t\tdf['ad_degree'] = np.where(df['education'].str.contains('Bachelors|Doctorate|Masters|Prof',regex = True), 1,0)\n\t\tdf['younger22'] = np.where(df['age']<22, 1,0)\n\t\tdf['older65'] = np.where(df['age']>65, 1,0)\n\t\tdf['occupation2'] = np.where(df['occupation'] == 2, 1,0)\t\t\n\t\t# df['log_cgs'] = np.log(df['capital_gains'])\n\t\tdf['jointu65_tax'] = np.where(df['tax'] == 'Joint both under 65', 1,0)\n\n\treturn df, y, TRAINING_SET_MEAN", "def itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_cast(*args)", "def assign_class_from_feature_annotation(feature_annotation_df):\n print \"============Start assigning and encoding classes=================\"\n print \"================thresholds=====================\"\n walkthreshold = 0.3\n ambithreshold = 0.25\n phonethreshold = 0.3\n computerthreshold = 0.3\n activity_thresholds = [0.3,0.25,0.3,0.3,0.3,0.3,0.3,0.25]\n activity_names = ['walking','eating','phone','computer','talking', 'reading','in car', 'drinking']\n class_codes = [1,2,3,4,5,6,7,8]\n activity_cols = [s_info.walkproportion_col,\n s_info.eatproportion_col,\n s_info.phoneproportion_col,\n s_info.computerproportion_col,\n s_info.talkproportion_col,\n s_info.readproportion_col,\n s_info.carproportion_col,\n s_info.drinkproportion_col]\n puff_with_puff_duration_threshold = 0.3\n puff_with_segment_duration_threshold = 0.3\n print \"walking: \" + str(walkthreshold)\n print \"drinking/eating: \" + str(ambithreshold)\n print \"phone: \" + str(phonethreshold)\n print \"computer: \" + str(computerthreshold)\n print \"puff: \" + str(puff_with_puff_duration_threshold) + \", \" + str(puff_with_segment_duration_threshold)\n class_df = feature_annotation_df.copy(deep=True)\n \n # class assignment rules\n class_df[s_info.classname_col] = 'others'\n class_df[s_info.classnum_col] = 0\n\n for code, name, threshold, col in zip(class_codes, activity_names, activity_thresholds, activity_cols):\n flag = class_df[col] >= threshold\n class_df[s_info.classname_col][flag] = name\n class_df[s_info.classnum_col][flag] = code\n # for short window size\n puff_flag1 = class_df[s_info.puff_with_segment_duration_col] >= puff_with_segment_duration_threshold\n class_df[s_info.classname_col][puff_flag1] = 'puff'\n class_df[s_info.classnum_col][puff_flag1] = len(class_codes) + 1\n # for short puffs\n puff_flag2 = class_df[s_info.puff_with_puff_duration_col] >= puff_with_puff_duration_threshold\n class_df[s_info.classname_col][puff_flag2] = 'puff'\n class_df[s_info.classnum_col][puff_flag2] = len(class_codes) + 1\n return class_df", "def transformation(self):\n for key in self.combination_dict.keys():\n if self.combination_dict[key]['column_count'] == 2:\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'tem' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'tem':\n self.temporal_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'cat' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'cat':\n self.categorical_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num' and self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n self.numerical_transformation(self.combination_dict[key])\n\n elif self.combination_dict[key]['column_count'] == 3:\n num_count = 0\n num_column = []\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num':\n num_count += 1\n num_column.append(0)\n elif self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n num_count += 1\n num_column.append(1)\n elif self.data_dict[self.combination_dict[key]['column3']]['data_type'] == 'num':\n num_count += 1\n num_column.append(2)\n\n if num_count == 1:\n self.three_column_groupby_logic(self.combination_dict[key], num_column)\n\n m_score_pie = []\n m_score_bar = []\n m_score_line = []\n m_score_scatter = []\n # for key in self.scenario_dict:\n # if self.scenario_dict\n for key in self.scenario_dict:\n if math.isnan(self.scenario_dict[key][\"Scatter_chart_score\"]):\n m_score_scatter.append(0)\n else:\n m_score_scatter.append(self.scenario_dict[key][\"Scatter_chart_score\"])\n m_score_pie.append(self.scenario_dict[key][\"Pie_chart_score\"])\n m_score_bar.append(self.scenario_dict[key][\"Bar_chart_score\"])\n m_score_line.append(self.scenario_dict[key][\"Line_chart_score\"])\n\n m_score_pie /= np.max(m_score_pie)\n m_score_bar /= np.max(m_score_bar)\n m_score_line /= np.max(m_score_line)\n m_score_scatter /= np.max(m_score_scatter)\n m_score = [m_score_pie, m_score_bar, m_score_line, m_score_scatter]\n match_index = np.argmax(m_score, axis = 0)\n i = 0\n for key in self.scenario_dict:\n if match_index[i] == 0:\n self.scenario_dict[key][\"Chart_Type\"] = \"pie\"\n if match_index[i] == 1:\n self.scenario_dict[key][\"Chart_Type\"] = \"bar\"\n if match_index[i] == 2:\n self.scenario_dict[key][\"Chart_Type\"] = \"line\"\n if match_index[i] == 3:\n self.scenario_dict[key][\"Chart_Type\"] = \"scatter\"\n self.scenario_dict[key][\"m_score\"] = m_score[match_index[i]][i]\n i += 1\n\n return self.scenario_dict", "def main(threshold):\n print(\"Processing\", threshold)\n subprocess.run([\"rm\", \"-rf\", \"data/tanzania_flood/threshold_{}\".format(threshold)])\n subprocess.run([\"mkdir\", \"data/tanzania_flood/threshold_{}\".format(threshold)])\n for arg_set in generate_args(threshold):\n convert(threshold=threshold, **arg_set)", "def convert_assignments(self, exprs):\n boolean = self.model.get_units_by_name('cellml:boolean')\n for expr in exprs:\n if isinstance(expr, mathml_apply):\n# print 'Converting? assignment', element_xpath(expr)\n if self.special_conversions:\n self.try_convert(self._check_special_conversion, expr)\n self.try_convert(expr._set_in_units, boolean)", "def cleanup_dataset(euctr_cond):\n euctr_cond['date_of_the_global_end_of_the_trial'] = pd.to_datetime(euctr_cond['date_of_the_global_end_of_the_trial'])\n euctr_cond['trial_is_part_of_a_paediatric_investigation_plan'] = (euctr_cond['trial_is_part_of_a_paediatric_investigation_plan'] == True).astype(int)\n euctr_cond['trial_human_pharmacology_phase_i'] = (euctr_cond['trial_human_pharmacology_phase_i']== True).astype(int)\n euctr_cond['trial_therapeutic_exploratory_phase_ii'] = (euctr_cond['trial_therapeutic_exploratory_phase_ii']== True).astype(int)\n euctr_cond['trial_therapeutic_confirmatory_phase_iii'] = (euctr_cond['trial_therapeutic_confirmatory_phase_iii']== True).astype(int)\n euctr_cond['trial_therapeutic_use_phase_iv'] = (euctr_cond['trial_therapeutic_use_phase_iv']== True).astype(int)\n euctr_cond['not_bioequivalence_study'] = (euctr_cond['trial_bioequivalence_study']== False).astype(int)\n euctr_cond['trial_bioequivalence_study'] = (euctr_cond['trial_bioequivalence_study']== True).astype(int)\n euctr_cond['rare_disease_blank'] = (euctr_cond['trial_condition_being_studied_is_a_rare_disease'] == 'Information not present in EudraCT').astype(int)\n euctr_cond['not_rare_disease'] = (euctr_cond['trial_condition_being_studied_is_a_rare_disease'] == 'No').astype(int)\n euctr_cond['trial_condition_being_studied_is_a_rare_disease'] = (euctr_cond['trial_condition_being_studied_is_a_rare_disease'] == 'Yes').astype(int)\n euctr_cond['not_single_blind'] = (euctr_cond['trial_single_blind']== False).astype(int)\n euctr_cond['trial_single_blind'] = (euctr_cond['trial_single_blind']== True).astype(int)\n euctr_cond['not_healthy_volunteers'] = (euctr_cond['subject_healthy_volunteers']== False).astype(int)\n euctr_cond['subject_healthy_volunteers'] = (euctr_cond['subject_healthy_volunteers']== True).astype(int)\n\n # Nick's notebook used pandas.notna, we reimplement a simplified version\n # here for compatibility with pandas 0.19\n def euctr_notna(x):\n return not (x is None or x is np.nan)\n euctr_cond['trial_results'] = (euctr_cond['trial_results'].apply(euctr_notna)).astype(int)\n\n euctr_cond.rename(columns={'full_title_of_the_trial':'full_title', 'name_or_abbreviated_title_of_the_trial_where_available': 'abbreviated_title'}, inplace=True)\n euctr_cond['non_eu'] = euctr_cond.eudract_number_with_country.str.contains('-3rd').astype(int)", "def convert(self, function=pointwise_mi):\n self.normalise()\n feat_prob = Counter()\n for feat_set in self.itervalues():\n for feat in feat_set:\n feat_prob[feat] += feat_set[feat]\n \n for feat_set in self.itervalues():\n code_prob = sum(feat_set.values())\n for feat in feat_set:\n feat_set[feat] = function(code_prob, feat_prob[feat], feat_set[feat])\n return self", "def cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(*args)", "def create_features_customer(profile, transcript_training):\n\n # create avg/min/max amount features. Need to calculate amount features from transcript\n # because transcript_training only contains transactions for offer received and viewed.\n # such transactions do not have amount associated\n\n query = \"\"\"\n SELECT a.person, min(amount) as min_amount, max(amount) as max_amount, avg(amount) as avg_amount\n FROM transcript a\n JOIN transcript_quantile b\n ON a.person = b.person \n WHERE a.time <= b.received_time\n GROUP BY a.person\n \"\"\"\n\n profile_amount = u.read_dataframe_from_sql(query).set_index('person')\n\n # create avg/min/max amount duration_view\n profile_duration = create_features_using_groupby(transcript_training\\\n \t, 'profile', 'duration_view')\n\n # create view rate (average of label)\n profile_view_rate = create_features_using_groupby(transcript_training, 'profile', 'label'\\\n \t, minimum=False, maximum=False)\n profile_view_rate.columns=['view_rate_profile']\n\n # create trx rate (count of transactions per person/(max received time - min received time))\n profile_trx_rate = (transcript_training.groupby('person').size()*100\\\n /(transcript_training.groupby('person')['received_time'].max() \\\n - transcript_training.groupby('person')['received_time'].min())).reset_index()\n profile_trx_rate.columns = ['person', 'avg_trx_cnt']\n # set trx rate = 1 if max received time == min received time\n profile_trx_rate.loc[profile_trx_rate['avg_trx_cnt']==np.inf, 'avg_trx_cnt'] = 1\n profile_trx_rate = profile_trx_rate.set_index('person')\n\n profile_feat = profile_amount.join(profile_duration)\\\n .join(profile_view_rate).join(profile_trx_rate)\n\n assert pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True).shape[0] == profile.shape[0]\\\n , \"rows do not match with original data (profile)\"\n\n profile = pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True)\n\n return profile", "def preprocess_mf3(bd, var1, var2, var3):\n filepath_mvf = f\"team67-ptp/data/{var1}_{var2}_{var3}.ftr\"\n filepath = bd\n data = feather.read_dataframe(filepath)\n df = data.copy()\n df2 = df[[var1, var2, var3]]\n if (\n df2[var1].dtype is \"category\"\n and df2[var2].dtype is \"category\"\n and df2[var3].dtype is \"category\"\n ):\n df2[var1] = df2[var1].astype(\"category\").cat.codes\n df2[var2] = df2[var2].astype(\"category\").cat.codes\n df2[var3] = df2[var3].astype(\"category\").cat.codes\n filename = filepath_mvf\n df2.to_csv(filename)\n print(\"Succesfully exported to feather\")\n elif (\n df2[var1].dtype is \"category\"\n and df2[var2].dtype is not \"category\"\n and df2[var3].dtype is not \"category\"\n ):\n df2[var1] = df2[var1].astype(\"category\").cat.codes\n filename = filepath_mvf\n df2.to_csv(filename)\n print(\"Succesfully exported to feather\")\n elif (\n df2[var1].dtype is not \"category\"\n and df2[var2].dtype is \"category\"\n and df2[var3].dtype is \"category\"\n ):\n df2[var2] = df2[var2].astype(\"category\").cat.codes\n filename = filepath_mvf\n df2.to_feather(filename)\n print(\"Succesfully exported to feather\")\n else:\n filename = filepath_mvf\n df2.to_feather(filename)\n print(\"Succesfully exported to feather\")", "def itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(*args)", "def _process_profiles(profiles):\n # keep_attributes = str.split(\"user_id public completion_percentage gender region last_login registration age\")\n # p2=profiles[keep_attributes]\n p2 = profiles\n p2['region'] = p2['region'].astype('category')\n p2['public'] = p2['public'].astype('category')\n p2['gender'] = p2['gender'].astype('category')\n p2['last_login'] = pd.to_datetime(p2['last_login'])\n p2['registration'] = pd.to_datetime(p2['registration'])\n p2.loc[p2.age == 0, 'age'] = np.nan\n\n return p2", "def preprocess_mf(bd, var1, var2):\n filepath_mvf = f\"team67-ptp/data/{var1}_{var2}.ftr\"\n filepath = bd\n data = feather.read_dataframe(filepath)\n df = data.copy()\n df2 = df[[var1, var2]]\n if df2[var1].dtype is \"category\" and df2[var2].dtype is \"category\":\n df2[var1] = df2[var1].astype(\"category\").cat.codes\n df2[var2] = df2[var2].astype(\"category\").cat.codes\n filename = filepath_mvf\n df2.to_csv(filename)\n print(\"Succesfully exported to feather\")\n elif df2[var1].dtype is \"category\" and df2[var2].dtype is not \"category\":\n df2[var1] = df2[var1].astype(\"category\").cat.codes\n filename = filepath_mvf\n df2.to_csv(filename)\n print(\"Succesfully exported to feather\")\n elif df2[var1].dtype is not \"category\" and df2[var2].dtype is \"category\":\n df2[var2] = df2[var2].astype(\"category\").cat.codes\n filename = filepath_mvf\n df2.to_feather(filename)\n print(\"Succesfully exported to feather\")\n else:\n filename = filepath_mvf\n df2.to_feather(filename)\n print(\"Succesfully exported to feather\")", "def filter_and_correct_expression_and_image_features(tissue, model, aggregation, patch_size, M, k, pc_correction=False, tf_correction=False):\n\n\n\n\n # Filter expression\n Y, X, dIDs, tIDs, tfs, ths, t_idx = extract_final_layer_data(tissue, model, aggregation, patch_size)\n filt_X, filt_tIDs, final_exp_idx = filter_expression(X, tIDs, M, k)\n\n\n\n if pc_correction:\n print ('Correcting with {} expression PCs'.format(pc_correction))\n pca = PCA(n_components=pc_correction)\n\n\n pca_predictors = pca.fit_transform(filt_X)\n\n # Correct Y\n lr = LinearRegression()\n lr.fit(pca_predictors, Y)\n predicted_Y = lr.predict(pca_predictors)\n corrected_Y = Y - predicted_Y\n\n # Correct X\n projected_filt_X = np.dot(pca_predictors,pca.components_)\n corrected_filt_X = filt_X - projected_filt_X\n\n # Set as return variables\n final_X = corrected_filt_X\n final_Y = corrected_Y\n\n elif tf_correction:\n print('Correcting with all technical factors')\n tf_Y = Y[t_idx,:]\n tf_filt_X = filt_X[t_idx,:]\n\n tfs[list(ths).index('SMTSISCH')] = np.log2(tfs[list(ths).index('SMTSISCH')] + 1)\n tf_predictors = tfs\n\n #Correct Y\n lr_Y = LinearRegression()\n lr_Y.fit(tf_predictors, tf_Y)\n tf_Y_predicted = lr_Y.predict(tf_predictors)\n corrected_tf_Y = tf_Y - tf_Y_predicted\n\n #Correct X\n lr_X = LinearRegression()\n lr_X.fit(tf_predictors, tf_filt_X)\n tf_filt_X_predicted = lr_X.predict(tf_predictors)\n corrected_tf_filt_X = tf_filt_X - tf_filt_X_predicted\n\n # Set as return variables\n final_X = corrected_tf_filt_X\n final_Y = corrected_tf_Y\n else:\n # Set unmodified values as return variables\n final_X = filt_X\n final_Y = Y\n\n return final_Y, final_X, dIDs, filt_tIDs, tfs, ths, t_idx", "def _another_traffic_source_preprocessing(self, df):\n # For 'campaign' & 'keyword'\n train_df = df.copy(deep=False)\n le = preprocessing.LabelEncoder()\n to_encode = ['campaign', 'keyword']\n for item in to_encode:\n item_key = 'trafficSource.' + item\n encoding_key = 'encoding_' + item\n train_df[item_key] = train_df[item_key].fillna(\"missing\")\n fitting_label = train_df[item_key].unique()\n le.fit(fitting_label)\n train_df[encoding_key] = le.transform(train_df[item_key])\n # Now for 'isTrueDirect'\n item_key = 'trafficSource.isTrueDirect'\n encoding_key = 'encoding_isTrueDirect'\n train_df[encoding_key] = train_df[item_key].fillna(False)\n\n train_gdf = train_df.groupby('fullVisitorId')\n return train_gdf['encoding_campaign'].sum(), train_gdf['encoding_isTrueDirect'].sum(), train_gdf['encoding_keyword'].sum()", "def check_cti(image, CTI, verbose=0):\n\n#\n# Initialize ctiDict\n#\n ctiDict = {'isCTI': False}\n ctiDict['expnum'] = image['EXPNUM']\n\n # Also create the BAND and NITE keywords if they are not present\n try:\n image['BAND']\n except:\n image['BAND'] = decaminfo.get_band(image['FILTER'])\n try:\n image['NITE']\n except:\n image['NITE'] = decaminfo.get_nite(image['DATE-OBS'])\n\n band = image['BAND'].strip()\n sec = section2slice(image['DATASEC' + CTI['amp']])\n#\n# This could become useful if it is necessary to start examining the opposite amplifier in\n# conjunction with the amplifier that is having a problem\n#\n# if (CTI['amp']==\"A\"):\n# osec = section2slice(image['DATASEC'+'B'])\n# else:\n# osec = section2slice(image['DATASEC'+'A'])\n\n maxiter = 10\n converge_num = 0.0001\n clipsig = 3.0\n\n clip_avg, clip_med, clip_std = lb.medclip(image.data[sec], clipsig, maxiter, converge_num, verbose=0)\n logger.info(' CTI: Global(clipped): median = {:.3f}, stddev = {:.3f} '.format(clip_med, clip_std))\n ctiDict['cmed'] = float(clip_med)\n ctiDict['cstd'] = float(clip_std)\n clow = clip_med - (3.0 * clip_std)\n ctiDict['clow'] = float(clow)\n\n# oclip_avg,oclip_med,oclip_std=medclip(image.data[osec],clipsig,maxiter,converge_num,verbose)\n# print(\" Global(oclipped): median = {:.3f}, stddev = {:.3f} \".format(oclip_med,oclip_std))\n# oclow=oclip_med-(3.0*oclip_std)\n\n#\n# Obtain row-by-row median to look for horizontal striping (also needed to check/reject edgebleeds)\n#\n row_med = np.median(image.data[sec], axis=1)\n wsm = np.where(row_med < clow)\n nrow_low = row_med[wsm].size\n#\n# Hacky attempt to check for edge-bleed\n#\n iedge = [4, 4091]\n while row_med[iedge[0]] < clow:\n iedge[0] = iedge[0] + 1\n while row_med[iedge[1]] < clow:\n iedge[1] = iedge[1] - 1\n if iedge[0] == 4:\n iedge[0] = 0\n if iedge[1] == 4091:\n iedge[1] = 4095\n nrow_edge = 4096 - (iedge[1] - iedge[0] + 1)\n logger.info(' CTI: Number of low rows: {:d} (nrow_edge={:d}) '.format(nrow_low, nrow_edge))\n\n#\n# Blank out pixels that are below the 3-sigma level with respect to median\n# This removes power from vertical stripes\n#\n wsm = np.where(image.data[sec] < clow)\n npix_low = image.data[sec][wsm].size\n logger.info(' CTI: Number of low pixels: {:d} '.format(npix_low))\n u = image.data[sec] - clip_med\n u[wsm] = 0.0\n#\n# Harder cut currently not needed. If used this would get rid of all pixels below the median level\n# (effectively this reduces the amount that noise suppresses contrast of the auto-correlation signal from CTI)\n#\n# wsm=np.where(u<0.)\n# npix_zero=u[wsm].size\n# logger.info(' CTI: Number of sub-zero pixels: {:d} '.format(npix_zero))\n# u[wsm]=0.0\n\n#\n# Calculate a set of auto-correlations by sampling lags in the x-direction and\n# then two diaganol sets at PA=+/-45 degrees\n# Note: y-direction lags would be succeptible to both bad columns and bleeds.\n# These are normalized by the auto-correlation with lag 0 (defined as 'a' below).\n# Take a maximum lag that will be calculated and use that to trim the image.\n# Note: This both gets rid of most edge-effects automatically but also removes the need to calculate an effective normalization for higher lags\n#\n maxlag = 100\n lagList = [0, 1, 3, 5, 7, 11, 15, 19, 23, 31, 37, 45]\n\n a = np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag:-maxlag, maxlag:-maxlag])\n# b=np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag:-maxlag,maxlag:-maxlag])\n x = [1.0]\n d1 = [1.0]\n d2 = [1.0]\n# vx=[1.0]\n# vd1=[1.0]\n# vd2=[1.0]\n#\n# More lags than those sampled are needed because the diagonal (PA=+/-45) measures will need to be interpolated\n# for comaparison to lags in the x-direction.\n#\n\n for lag in lagList:\n if lag != 0:\n x.append(np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag:-maxlag, maxlag - lag:-maxlag - lag]) / a)\n d1.append(np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag-lag:-maxlag - lag, maxlag - lag:-maxlag - lag]) / a)\n d2.append(np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag-lag:-maxlag - lag, maxlag + lag:-maxlag + lag]) / a)\n# vx.append(np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag:-maxlag,maxlag-lag:-maxlag-lag])/b)\n# vd1.append(np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag-lag:-maxlag-lag,maxlag-lag:-maxlag-lag])/b)\n# vd2.append(np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag-lag:-maxlag-lag,maxlag+lag:-maxlag+lag])/b)\n\n data = {'lag': np.array(lagList),\n 'x': np.array(x),\n 'd1': np.array(d1),\n 'd2': np.array(d2)\n# 'vx':np.array(vx),\n# 'vd1':np.array(vd1),\n# 'vd2':np.array(vd2)\n }\n\n r2 = np.sqrt(2.0)\n l1 = data['lag']\n l2 = data['lag'] * r2\n x1 = data['x']\n d1i = np.interp(data['lag'], l2, data['d1'])\n d2i = np.interp(data['lag'], l2, data['d2'])\n rd1 = data['x'] / d1i\n rd2 = data['x'] / d2i\n\n# vx1=data['vx']\n# vd1i=np.interp(data['lag'],l2,data['vd1'])\n# vd2i=np.interp(data['lag'],l2,data['vd2'])\n# vrd1=data['vx']/vd1i\n# vrd2=data['vx']/vd2i\n## vdx=data['x']/data['vx']\n# vdx=(rd1+rd2)/(vrd1+vrd2)\n\n logger.info(' CTI: lags {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(l1[3], l1[4], l1[6], l1[8], l1[10]))\n logger.info(' CTI: lx {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(x1[3], x1[4], x1[6], x1[8], x1[10]))\n logger.info(' CTI: d1i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(d1i[3], d1i[4], d1i[6], d1i[8], d1i[10]))\n logger.info(' CTI: d2i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(d2i[3], d2i[4], d2i[6], d2i[8], d2i[10]))\n logger.info(' CTI: ld1 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(rd1[3], rd1[4], rd1[6], rd1[8], rd1[10]))\n logger.info(' CTI: ld2 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(rd2[3], rd2[4], rd2[6], rd2[8], rd2[10]))\n# logger.info(' CTI: lvx {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vx1[3],vx1[4],vx1[6],vx1[8],vx1[10]))\n# logger.info(' CTI:vd1i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vd1i[3],vd1i[4],vd1i[6],vd1i[8],vd1i[10]))\n# logger.info(' CTI:vd2i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vd2i[3],vd2i[4],vd2i[6],vd2i[8],vd2i[10]))\n# logger.info(' CTI:vld1 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vrd1[3],vrd1[4],vrd1[6],vrd1[8],vrd1[10]))\n# logger.info(' CTI:vld2 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vrd2[3],vrd2[4],vrd2[6],vrd2[8],vrd2[10]))\n# logger.info(' CTI:vdx0 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vdx[3],vdx[4],vdx[6],vdx[8],vdx[10]))\n\n#\n# Set band dependent thresholds...\n# Note the criteria used are based on an empirical study of the one example we currently have (CCD=41, Y6)\n#\n nrow_lim = 5\n if band != \"Y\":\n cclim = 0.9\n else:\n cclim = 1.15\n#\n# Now check and set flag based on empirical critera.\n# First are the horizontal streaks that can appear...\n# Second are the comparison of the auto-correlation in the x and average of the diaganol directrions\n#\n flag_cti = False\n if nrow_low - nrow_edge >= nrow_lim:\n flag_cti = True\n\n avg_rd = (rd1 + rd2) / 2.0\n if avg_rd[3] > cclim and avg_rd[4] > cclim and avg_rd[6] > cclim:\n flag_cti = True\n\n if flag_cti:\n ctiDict['isCTI'] = True\n\n return ctiDict", "def transform(self, dataset, number_of_thresholds=1,\n enable_valley_emphasis=False):\n\n # Initial progress\n self.progress.value = 0\n self.progress.maximum = 100\n\n # Approximate percentage of work completed after each step in the\n # transform\n STEP_PCT = [10, 20, 70, 90, 100]\n\n try:\n import itk\n import itkExtras\n import itkTypes\n from tomviz import itkutils\n except Exception as exc:\n print(\"Could not import necessary module(s)\")\n raise exc\n\n # Return values\n returnValues = None\n\n # Add a try/except around the ITK portion. ITK exceptions are\n # passed up to the Python layer, so we can at least report what\n # went wrong with the script, e.g,, unsupported image type.\n try:\n self.progress.value = STEP_PCT[0]\n self.progress.message = \"Converting data to ITK image\"\n\n # Get the ITK image\n itk_image = itkutils.dataset_to_itk_image(dataset)\n itk_input_image_type = type(itk_image)\n\n # OtsuMultipleThresholdsImageFilter's wrapping requires that the\n # input and output image types be the same.\n itk_threshold_image_type = itk_input_image_type\n\n # Otsu multiple threshold filter\n otsu_filter = itk.OtsuMultipleThresholdsImageFilter[\n itk_input_image_type, itk_threshold_image_type].New()\n otsu_filter.SetNumberOfThresholds(number_of_thresholds)\n otsu_filter.SetValleyEmphasis(enable_valley_emphasis)\n otsu_filter.SetInput(itk_image)\n itkutils.observe_filter_progress(self, otsu_filter,\n STEP_PCT[1], STEP_PCT[2])\n\n try:\n otsu_filter.Update()\n except RuntimeError:\n return\n\n print(\"Otsu threshold(s): %s\" % (otsu_filter.GetThresholds(),))\n\n itk_image_data = otsu_filter.GetOutput()\n\n # Cast threshold output to an integral type if needed.\n py_buffer_type = itk_threshold_image_type\n voxel_type = itkExtras.template(itk_threshold_image_type)[1][0]\n if voxel_type is itkTypes.F or voxel_type is itkTypes.D:\n self.progress.message = \"Casting output to integral type\"\n\n # Unsigned char supports 256 labels, or 255 threshold levels.\n # This should be sufficient for all but the most unusual use\n # cases.\n py_buffer_type = itk.Image.UC3\n caster = itk.CastImageFilter[itk_threshold_image_type,\n py_buffer_type].New()\n caster.SetInput(itk_image_data)\n itkutils.observe_filter_progress(self, caster,\n STEP_PCT[2], STEP_PCT[3])\n\n try:\n caster.Update()\n except RuntimeError:\n return\n\n itk_image_data = caster.GetOutput()\n\n self.progress.value = STEP_PCT[3]\n self.progress.message = \"Saving results\"\n\n label_map_dataset = dataset.create_child_dataset()\n itkutils.set_itk_image_on_dataset(itk_image_data, label_map_dataset,\n dtype=py_buffer_type)\n\n self.progress.value = STEP_PCT[4]\n\n # Set up dictionary to return operator results\n returnValues = {}\n returnValues[\"label_map\"] = label_map_dataset\n\n except Exception as exc:\n print(\"Problem encountered while running %s\" %\n self.__class__.__name__)\n raise exc\n\n return returnValues", "def cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIF2IF2IF2IF2_cast(*args)", "def enrichment_factor(y_true, y_score, percentage=..., pos_label=..., kind=...):\n ...", "def preprocess_sf(bd, var):\n filepath_svf = f\"team67-ptp/data/{var}.ftr\"\n filepath = bd\n data = feather.read_dataframe(filepath)\n df = data.copy()\n df2 = df[var]\n df2 = df2.to_frame()\n if df2[var].dtype is \"category\":\n df2[var] = df2[var].astype(\"category\").cat.codes\n filename = filepath_svf\n df2.to_feather(filename)\n print(\"Succesfully exported to feather\")\n else:\n filename = filepath_svf\n df2.to_feather(filename)\n print(\"Succesfully exported to feather\")", "def add_conversions_for_component(self, comp):\n model = self.model\n if self.special_conversions:\n self.model._cml_special_units_converter = self._apply_special_conversion_for_nested_expr\n assignments = model.search_for_assignments(comp)\n self.convert_assignments(assignments)\n if self.special_conversions:\n del self.model._cml_special_units_converter\n for conn in getattr(model, u'connection', []):\n cname1 = conn.map_components.component_1\n cname2 = conn.map_components.component_2\n if comp.name in [cname1, cname2]:\n comp1 = model.get_component_by_name(cname1)\n comp2 = model.get_component_by_name(cname2)\n for mapping in conn.map_variables:\n var1 = model.get_variable_by_name(cname1, mapping.variable_1)\n var2 = model.get_variable_by_name(cname2, mapping.variable_2)\n self.convert_mapping(mapping, comp1, comp2, var1, var2)", "def convert_attribute(df):\n\n df['Popularity'] = ['Oversold' if x == '(1.0, 2.0]' else 'Unpopular' if x == '(-1.0, 0.0]' or x == '(0.0, 0.5]' else 'Popular' for x in df['percent_of_cap_binRange1']]\n\n return df", "def metrics_evaluation(y_true, y_prob, threshold, df_type='train'):\n\n y_pred = (y_prob>=threshold).astype(int)\n \n tn = metrics.confusion_matrix(y_true, y_pred)[0][0]\n fp = metrics.confusion_matrix(y_true, y_pred)[0][1]\n fn = metrics.confusion_matrix(y_true, y_pred)[1][0]\n tp = metrics.confusion_matrix(y_true, y_pred)[1][1]\n\n accuracy_scr = metrics.accuracy_score(y_true, y_pred)\n precision_scr = metrics.precision_score(y_true, y_pred)\n recall_scr = metrics.recall_score(y_true, y_pred)\n f1_scr = metrics.f1_score(y_true, y_pred)\n roc_auc_scr = metrics.roc_auc_score(y_true, y_pred)\n\n result = {'Dataset': df_type, 'No obs': len(y_true), 'Threshold': threshold,\n 'TP':tp, 'FP': fp, 'TN': tn, 'FN':fn , \n 'Accuracy Score':accuracy_scr, 'Precision Score':precision_scr, \n 'Recall Score':recall_scr, 'F1 Score':f1_scr, 'ROC AUC Score':roc_auc_scr}\n\n return result" ]
[ "0.66733557", "0.49194333", "0.49031988", "0.4838306", "0.48149535", "0.47954047", "0.478979", "0.4768422", "0.47311518", "0.46969774", "0.46876988", "0.4677883", "0.4670147", "0.46662682", "0.4616618", "0.46068928", "0.46041748", "0.45904663", "0.45898837", "0.45892733", "0.45309654", "0.4506158", "0.45045283", "0.45014715", "0.44987324", "0.4496679", "0.44645092", "0.44582638", "0.44518247", "0.4451442" ]
0.57825917
1
Returns a heatmap for a given species (species_id) and a list of probes. It returns a dict with 'order' the order of the experiments and 'heatmap' another dict with the actual data. Data is zlog transformed
def get_heatmap(species_id, probes, zlog=True, raw=False): profiles = ( ExpressionProfile.query.options(undefer("profile")) .filter_by(species_id=species_id) .filter(ExpressionProfile.probe.in_(probes)) .all() ) order = [] output = [] not_found = [p.lower() for p in probes] for profile in profiles: name = profile.probe data = json.loads(profile.profile) order = data["order"] experiments = data["data"] with contextlib.suppress(ValueError): not_found.remove(profile.probe.lower()) with contextlib.suppress(ValueError): not_found.remove(profile.sequence.name.lower()) values = {} for o in order: values[o] = mean(experiments[o]) row_mean = mean(values.values()) row_max = max(values.values()) for o in order: if zlog: if row_mean == 0 or values[o] == 0: values[o] = "-" else: try: values[o] = log(values[o] / row_mean, 2) except ValueError as _: print("Unable to calculate log()", values[o], row_mean) values[o] = "-" else: if row_max != 0 and not raw: values[o] = values[o] / row_max output.append( { "name": name, "values": values, "sequence_id": profile.sequence_id, "shortest_alias": profile.sequence.shortest_alias, } ) if len(not_found) > 0: flash("Couldn't find profile for: %s" % ", ".join(not_found), "warning") return {"order": order, "heatmap_data": output}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_heatmap(data, labels_dict, file_title, plot_title):\n\n fig = plt.figure()\n ax = sn.heatmap(data,\n linewidths=0.3)\n figure = ax.get_figure()\n\n if labels_dict:\n ax.set_xlabel(labels_dict[\"x\"])\n ax.set_ylabel(labels_dict[\"y\"])\n if plot_title:\n ax.set_title(plot_title)\n\n figure.savefig(file_title)", "def get_di_heatmap(data_dir, save_dir=None, figsize=[10, 8]):\n # load up (ordered) DI scores, sans chromosome column.\n rho_df = load_di_scores(data_dir\n , order=True)\n\n # make DI score heatmap.\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n fig.suptitle('DI score heatmap')\n\n sns.heatmap(rho_df\n , cmap='RdBu'\n , cbar_kws={\"shrink\": .5})\n\n ax.set_xticklabels(ax.get_xticklabels()\n , rotation=45)\n fig.tight_layout(rect=[0, 0, 1, 0.95])\n\n if save_dir:\n save_path = os.path.abspath(os.path.join(save_dir, 'di_heatmap.png'))\n fig.savefig(save_path\n , dpi=200)\n plt.close(fig)\n\n return save_path\n\n return fig", "def timeit_heatmap(data, xlabel='xlabel', ylabel='ylabel', **kwargs):\n dataT = {}\n figs = []\n series = kwargs.get('series', (0,1))\n cmap = kwargs.get('cmap', cm.coolwarm)\n for k, v in data.items():\n dataT[k] = zip(*v)\n X, Y, Z = dataT[k][series[0]], dataT[k][series[1]], dataT[k][-1]\n left, right = min(X), max(X)\n bottom, top = min(Y), max(Y)\n extent = [left, right, bottom, top]\n wide, tall = (max(X)-min(X)+1), (max(Y)-min(Y)+1)\n intervalX = max(X) - min(heapq.nlargest(2,set(X)))\n intervalY = max(Y) - min(heapq.nlargest(2,set(Y)))\n if intervalX > 1: \n wide = 1 + wide/intervalX\n else:\n wide = 1\n if intervalY > 1: \n tall = 1 + tall/intervalY\n else: \n tall = 1\n # TODO: BUG: fix so that Z transposes with x & y series reversed\n Z = np.reshape(Z, [wide, tall])\n Z = list(zip(*Z)) # Z is transposed\n Z = [i for i in Z[::-1]] # Z is upside down\n fig, ax = plt.subplots()\n hmap = ax.imshow(Z, extent=extent, cmap=cmap, interpolation='nearest')\n fig.colorbar(hmap).set_label(\"time\")\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_title(substitute_titles(k,series))\n figs.append(fig)\n return figs", "def plot_heatmap():\n dir = \"log/heatmap\"\n pattern = r'Ligne (\\d{1,2}).csv$'\n utils.construct_heatmap_set(dir, pattern)\n df_heat = pd.read_csv('data/heat.csv', sep=';')\n heatmap_dir = os.path.join('figures', 'heatmaps')\n for beacon in colums:\n utils.plot_heatmap(df_heat, beacon)\n plt.title(beacon)\n if not os.path.isdir(heatmap_dir):\n os.makedirs(heatmap_dir)\n plt.savefig(os.path.join(heatmap_dir, utils.find_beacon_name(beacon) + '.png'))", "def heatmap_plot(savefigure=False):\n treenumber,depth = [1, 5, 10, 50, 100, 500, 1000], list(range(1,11))\n iter_list = list(product(treenumber, depth))\n result_list = np.load('npy-data/result_big_run.npy')\n heat_arr = result_list.reshape((len(treenumber), len(depth)))\n\n fig, ax = plt.subplots(figsize=(8,8))\n im = ax.matshow(heat_arr, cmap=plt.cm.coolwarm, vmin=.8, vmax=1)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.2)\n\n ax.set_xticks(np.arange(heat_arr.shape[1]))\n ax.set_xticklabels(depth)\n ax.set_yticks(np.arange(heat_arr.shape[0]))\n ax.set_yticklabels(treenumber)\n ax.set_xlim(-.5,heat_arr.shape[1]-.5)\n ax.set_ylim(heat_arr.shape[0]-.5,-.5)\n ax.set_xlabel(\"Maximum depth\")\n ax.set_ylabel(\"Number of trees\")\n ax.xaxis.set_ticks_position('bottom')\n plt.colorbar(im, cax=cax)\n for i, z in enumerate(heat_arr):\n for j in range(len(z)):\n ax.text(j, i, '{:.3f}'.format(z[j]), ha='center', va='center')\n if savefigure:\n plt.savefig('img/xgb_heatmap_maxdepth_n_estimators.pdf', bbox_inches='tight')\n plt.show()", "def generate_heatmap(self):\n data = []\n for j in range(len(self.generations[0])):\n row = []\n for i in range(len(self.generations)):\n row.append(self.generations[i][j].fitness)\n data.append(row)\n data = np.array(data)\n\n # Display log error in colorbar.\n tick_range = range(\n math.floor(math.log10(data.min().min())),\n 1 + math.ceil(math.log10(data.max().max())))\n cbar_ticks = [math.pow(10, i) for i in tick_range]\n log_norm = LogNorm(vmin=data.min().min(), vmax=data.max().max())\n\n plt.figure(figsize=(10, 5))\n ax = sns.heatmap(\n data,\n cmap='viridis',\n xticklabels=2,\n yticklabels=2,\n norm=log_norm,\n cbar_kws={'ticks': cbar_ticks, 'aspect': 15})\n\n hfont = {'fontname': 'Helvetica'}\n plt.xlabel('Generation', **hfont)\n plt.ylabel('Individual', **hfont)\n plt.xticks(\n [i for i in range(0, self.config.max_generations, 5)],\n [i for i in range(0, self.config.max_generations, 5)])\n plt.yticks(\n [i for i in range(0, self.config.population_size, 5)],\n [i for i in range(0, self.config.population_size, 5)])\n\n ax.invert_yaxis()\n ax.collections[0].colorbar.set_label('Error')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n plt.savefig('figures/Parameter Tuning Figure/heatmap.svg')", "def heatmap(args):\n p = OptionParser(heatmap.__doc__)\n p.add_option(\"--stacks\",\n default=\"Exons,Introns,DNA_transposons,Retrotransposons\",\n help=\"Features to plot in stackplot [default: %default]\")\n p.add_option(\"--heatmaps\",\n default=\"Copia,Gypsy,hAT,Helitron,Introns,Exons\",\n help=\"Features to plot in heatmaps [default: %default]\")\n p.add_option(\"--meres\", default=None,\n help=\"Extra centromere / telomere features [default: %default]\")\n add_window_options(p)\n opts, args, iopts = p.set_image_options(args, figsize=\"8x5\")\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n fastafile, chr = args\n window, shift, subtract = check_window_options(opts)\n\n stacks = opts.stacks.split(\",\")\n heatmaps = opts.heatmaps.split(\",\")\n stackbeds = get_beds(stacks)\n heatmapbeds = get_beds(heatmaps)\n stackbins = get_binfiles(stackbeds, fastafile, shift, subtract=subtract)\n heatmapbins = get_binfiles(heatmapbeds, fastafile, shift, subtract=subtract)\n\n margin = .06\n inner = .015\n clen = Sizes(fastafile).mapping[chr]\n\n fig = plt.figure(1, (iopts.w, iopts.h))\n root = fig.add_axes([0, 0, 1, 1])\n\n # Gauge\n ratio = draw_gauge(root, margin, clen, rightmargin=4 * margin)\n yinterval = .3\n xx = margin\n yy = 1 - margin\n yy -= yinterval\n xlen = clen / ratio\n cc = chr\n if \"_\" in chr:\n ca, cb = chr.split(\"_\")\n cc = ca[0].upper() + cb\n\n root.add_patch(Rectangle((xx, yy), xlen, yinterval - inner, color=gray))\n ax = fig.add_axes([xx, yy, xlen, yinterval - inner])\n\n nbins = get_nbins(clen, shift)\n\n owindow = clen / 100\n if owindow > window:\n window = owindow / shift * shift\n\n stackplot(ax, stackbins, nbins, palette, chr, window, shift)\n ax.text(.1, .9, cc, va=\"top\", zorder=100, transform=ax.transAxes,\n bbox=dict(boxstyle=\"round\", fc=\"w\", alpha=.5))\n\n # Legends\n xx += xlen + .01\n yspace = (yinterval - inner) / (len(stackbins) + 1)\n yy = 1 - margin - yinterval\n for s, p in zip(stacks, palette):\n s = s.replace(\"_\", \" \")\n s = Registration.get(s, s)\n\n yy += yspace\n root.add_patch(Rectangle((xx, yy), inner, inner, color=p, lw=0))\n root.text(xx + 1.5 * inner, yy, s, size=10)\n\n yh = .05 # Heatmap height\n # Heatmaps\n xx = margin\n yy = 1 - margin - yinterval - inner\n for s, p in zip(heatmaps, heatmapbins):\n s = s.replace(\"_\", \" \")\n s = Registration.get(s, s)\n\n yy -= yh\n m = stackarray(p, chr, window, shift)\n\n Y = np.array([m, m])\n root.imshow(Y, extent=(xx, xx + xlen, yy, yy + yh - inner),\n interpolation=\"nearest\", aspect=\"auto\")\n root.text(xx + xlen + .01, yy, s, size=10)\n\n yy -= yh\n\n meres = opts.meres\n if meres:\n bed = Bed(meres)\n for b in bed:\n if b.seqid != chr:\n continue\n pos = (b.start + b.end) / 2\n cpos = pos / ratio\n xx = margin + cpos\n accn = b.accn.capitalize()\n root.add_patch(CirclePolygon((xx, yy), radius=.01, fc=\"m\", ec=\"m\"))\n root.text(xx + .014, yy, accn, va=\"center\", color=\"m\")\n\n root.set_xlim(0, 1)\n root.set_ylim(0, 1)\n root.set_axis_off()\n\n image_name = chr + \".\" + iopts.format\n savefig(image_name, dpi=iopts.dpi, iopts=iopts)", "def heatmap(island_results):\n kart_herb = []\n kart_carn = []\n for row in island_results:\n h_row = []\n c_row = []\n for cell in row:\n h_row.append(cell[\"herbivores\"])\n c_row.append(cell[\"carnivores\"])\n kart_herb.append(h_row)\n kart_carn.append(c_row)\n return kart_herb, kart_carn", "def _cmd_heatmap(args):\n cnarrs = []\n for fname in args.filenames:\n cnarr = read_cna(fname)\n if args.adjust_xy:\n is_sample_female = verify_sample_sex(\n cnarr, args.sample_sex, args.male_reference, args.diploid_parx_genome\n )\n cnarr = cnarr.shift_xx(args.male_reference, is_sample_female, args.diploid_parx_genome)\n cnarrs.append(cnarr)\n heatmap.do_heatmap(\n cnarrs,\n args.chromosome,\n args.desaturate,\n args.by_bin,\n args.delim_sampl,\n args.vertical,\n args.title,\n )\n if args.output:\n oformat = os.path.splitext(args.output)[-1].replace(\".\", \"\")\n pyplot.savefig(args.output, format=oformat, bbox_inches=\"tight\")\n logging.info(\"Wrote %s\", args.output)\n else:\n pyplot.show()", "def heatmap(df, cmap ='RdBu' ):\n\n # TODO: mpld3 does not display axis labels properly\n\n # TODO: Replace with an interactive plot, see bokeh:\n # http://bokeh.pydata.org/docs/gallery/les_mis.html\n\n fig, ax = plt.subplots()\n data = df.as_matrix()\n if isinstance(cmap, str):\n cmap = plt.get_cmap(cmap)\n\n ax.pcolor(data, cmap = cmap)\n ax.set_xticks(np.arange(data.shape[1])+0.5, minor = False)\n ax.set_xticklabels(df.columns)\n \n ax.set_yticks(np.arange(data.shape[0])+0.5, minor = False)\n ax.set_yticklabels(df.index)\n ax.invert_yaxis()\n ax.xaxis.tick_top()\n\n return fig", "def generate_heatmap(self):\n data = []\n for j in range(len(self.generations[0])):\n row = []\n for i in range(len(self.generations)):\n row.append(self.generations[i][j].fitness)\n data.append(row)\n data = np.array(data)\n\n plt.figure()\n ax = sns.heatmap(\n data,\n cmap='RdBu',\n xticklabels=2,\n yticklabels=2)\n\n hfont = {'fontname': 'Helvetica'}\n plt.xlabel('Generation', **hfont)\n plt.ylabel('Individual', **hfont)\n ax.invert_yaxis()\n ax.axhline(linewidth=4, color='black')\n ax.axvline(linewidth=4, color='black')\n ax.collections[0].colorbar.set_label('Fitness')\n plt.savefig('figures/Voltage Clamp Figure/Single VC Optimization/'\n 'heatmap.svg')", "def heatmap(filename, data):\n\n fig, ax = ppl.subplots(1)\n ppl.pcolormesh(fig, ax, data, vmin=-0.0016, vmax=0.0016)\n fig.savefig(filename + \".png\")", "def heatmap(data, row_labels, col_labels, ax=None,\r\n cbar_kw={}, cbarlabel=\"\", title = \"Default\", x_title=\" \",y_title=\" \",saveFile = None, **kwargs):", "def heatmap():\n\n team = int(request.form.get('teamname'))\n player = int(request.form.get('player'))\n attacks = request.form.get('attacks')\n data = request.form.get('datafiles')\n\n print 'Making heat map for player #%d, team #%d, attacks %s' % (player, team, attacks)\n\n if attacks.lower() == 'all':\n attacks = ['ALL']\n else:\n attacks = attacks.split(',')\n\n only_kills = request.form.get('kills')\n\n files = []\n if data == 'uploads':\n data = 'uploads/%d' % session['user_id']\n folder = 'data/' + data\n\n data_path = os.path.join(os.getcwd(), folder)\n if not os.path.isdir(data_path):\n os.makedirs(data_path)\n\n for f in os.listdir(data_path):\n if f.endswith('.dvw'):\n files.append(folder + '/' + f)\n\n locations = []\n for file_name in files:\n parser = Parser(file_name)\n new_locations = parser.get_attack_info(team, player, attacks, only_kills=only_kills)\n locations.extend(new_locations)\n\n top_caption, bottom_caption = generate_caption(team, player, attacks, only_kills)\n output_url = generate_output_filename(team, player, attacks, only_kills, session['user_id'])\n output_dict = heat_map.draw_arcs_pillow(locations, output_url, top_caption=top_caption, bottom_caption=bottom_caption)\n\n result_dict = {\n 'output_url': output_url,\n 'width': output_dict['width'],\n 'height': output_dict['height'],\n }\n print locations\n print 'Rendering finished image', output_url\n return render_template('heatmap.html', result_dict=result_dict)", "def make_heatmap(self):\n\n self.get_selected_categories_and_codes()\n codes = deepcopy(self.codes)\n if len(codes) > 40:\n codes = codes[:40]\n Message(self.app, _(\"Too many codes\"), _(\"Too many codes for display. Restricted to 40\")).exec()\n # Filters\n heatmap_type = self.ui.comboBox_heatmap.currentText()\n if heatmap_type == \"\":\n return\n title = heatmap_type + \" \" + _(\"Heatmap\")\n self.get_selected_categories_and_codes()\n y_labels = []\n for c in codes:\n y_labels.append(c['name'])\n category = self.ui.comboBox_category.currentText()\n self.ui.lineEdit_filter.setText(\"\")\n self.ui.comboBox_case.setCurrentIndex(0)\n self.ui.comboBox_file.setCurrentIndex(0)\n owner, subtitle = self.owner_and_subtitle_helper()\n\n # Get all the coded data\n data = []\n x_labels = []\n cur = self.app.conn.cursor()\n if heatmap_type == \"File\":\n if not self.attribute_file_ids:\n sql = \"select id, name from source order by name\"\n cur.execute(sql)\n files = cur.fetchall()\n else:\n attr_msg, file_ids_txt = self.get_file_ids()\n subtitle += attr_msg\n sql = \"select id, name from source where id \" + file_ids_txt + \" order by name\"\n cur.execute(sql)\n files = cur.fetchall()\n if len(files) > 40:\n files = files[:40]\n Message(self.app, _(\"Too many files\"), _(\"Too many files for display. Restricted to 40\")).exec()\n for f in files:\n x_labels.append(f[1])\n # Calculate the frequency of each code in each file\n # Each row is a code, each column is a file\n for code_ in codes:\n code_counts = []\n for f in files:\n code_counts.append(self.heatmap_counter_by_file_and_code(owner, f[0], code_['cid']))\n data.append(code_counts)\n if heatmap_type == \"Case\":\n if not self.attribute_case_ids_and_names: # self.attribute_file_ids:\n sql = \"select caseid, name from cases order by name\"\n cur.execute(sql)\n cases = cur.fetchall()\n if len(cases) > 40:\n cases = cases[:40]\n Message(self.app, _(\"Too many cases\"), _(\"Too many cases for display. Restricted to 40\")).exec()\n for c in cases:\n x_labels.append(c[1])\n # Calculate the frequency of each code in each file\n # Each row is a code, each column is a file\n for code_ in codes:\n code_counts = []\n for c in cases:\n cur.execute(\"SELECT fid FROM case_text where caseid=?\", [c[0]])\n fids = cur.fetchall()\n case_counts = 0\n for fid in fids:\n case_counts += self.heatmap_counter_by_file_and_code(owner, fid[0], code_['cid'])\n code_counts.append(case_counts)\n data.append(code_counts)\n else:\n attr_msg, file_ids_txt = self.get_file_ids()\n print(self.attribute_case_ids_and_names)\n for c in self.attribute_case_ids_and_names:\n x_labels.append(c[1])\n subtitle += attr_msg\n # Calculate the frequency of each code in each file\n # Each row is a code, each column is a file\n for code_ in codes:\n code_counts = []\n for c in self.attribute_case_ids_and_names:\n cur.execute(\"SELECT fid FROM case_text where caseid=?\", [c[0]])\n fids = cur.fetchall()\n # TODO revise fids if file parameters selected\n case_counts = 0\n for fid in fids:\n case_counts += self.heatmap_counter_by_file_and_code(owner, fid[0], code_['cid'])\n code_counts.append(case_counts)\n data.append(code_counts)\n # Create the plot\n fig = px.imshow(data,\n labels=dict(x=heatmap_type, y=\"Codes\", color=\"Count\"),\n x=x_labels,\n y=y_labels,\n title=title+subtitle\n )\n fig.update_xaxes(side=\"top\")\n fig.show()\n self.helper_export_html(fig)\n self.ui.comboBox_heatmap.blockSignals(True)\n self.ui.comboBox_heatmap.setCurrentIndex(0)\n self.ui.comboBox_heatmap.blockSignals(False)", "def heatmap(mat, x_label=None, y_label=None, axes=None,\n title=None, save=False):\n sns.heatmap(mat)\n plt.show()", "def plot_HDres_histos_vs_z(\n df,\n nameout,\n threshold_var=\"class0\",\n threshold_list=[0.5, 0.7, 0.9],\n threshold_sign=\">\",\n):\n\n P = df[df[\"class0\"] > 0.5]\n Ias = df[df[\"target\"] == 0]\n\n TP = P[P[\"target\"] == 0]\n FP = P[P[\"target\"] != 0]\n\n sel_TP_dic = {}\n sel_FP_dic = {}\n for t in threshold_list:\n if threshold_sign == \">\":\n sel_TP_dic[t] = TP[TP[threshold_var] > t]\n sel_FP_dic[t] = FP[FP[threshold_var] > t]\n else:\n sel_TP_dic[t] = TP[TP[threshold_var] < t]\n sel_FP_dic[t] = FP[FP[threshold_var] < t]\n\n plt.clf()\n cm = CMAP\n fig = plt.figure(figsize=(14, 14))\n # gs = gridspec.GridSpec(4, 2, width_ratios=[3, 1], height_ratios=[2, 2, 1, 1])\n # gs.update(wspace=0.1, hspace=0.3)\n\n # # gridspec init\n # ax00 = plt.subplot(gs[0, 0]) # Hres Ia\n # ax10 = plt.subplot(gs[1, 0], sharex=ax00) # Hres CC\n # ax20 = plt.subplot(gs[2:, 0], sharex=ax00) # efficiency\n # ax01 = plt.subplot(gs[0, 1], sharey=ax00) # histo Ia\n # ax11 = plt.subplot(gs[1, 1], sharey=ax10) # histo CC\n # ax21 = plt.subplot(gs[2, 1]) # histo x1\n # ax31 = plt.subplot(gs[3, 1]) # histo c\n gs = gridspec.GridSpec(3, 3, height_ratios=[2, 2, 1])\n # gs.update(wspace=0.2, hspace=0.1)\n\n # gridspec init\n ax00 = plt.subplot(gs[0, 0:2]) # Hres Ia\n ax10 = plt.subplot(gs[1, 0:2], sharex=ax00) # Hres CC\n ax20 = plt.subplot(gs[2, 0]) # redshift dist\n ax01 = plt.subplot(gs[0, 2], sharey=ax00) # histo Ia\n ax11 = plt.subplot(gs[1, 2], sharey=ax10) # histo CC\n ax21 = plt.subplot(gs[2, 1]) # histo x1\n ax31 = plt.subplot(gs[2, 2]) # histo c\n\n # lines\n ax00.plot([0, 1.2], np.zeros(len([0, 1.2])), \"k:\")\n ax10.plot([0, 1.2], np.zeros(len([0, 1.2])), \"k:\")\n\n mubins = np.arange(-2, 2 + 0.1, 0.1)\n\n # Hres w. histogram\n def HRwhisto(\n df, sel_dic, ax_left, ax_right, threshold_sign, ylabel=\"TP\", visible=False\n ):\n if ylabel == \"TP\":\n sntyp = \"Ia\"\n else:\n sntyp = \"CC\"\n ax_left.scatter(\n df[\"SIM_REDSHIFT_CMB\"],\n df[\"delmu\"],\n c=df[\"class0\"],\n cmap=CMAP,\n vmin=0.5,\n vmax=1,\n s=8,\n )\n ax_left.errorbar(\n df[\"SIM_REDSHIFT_CMB\"],\n df[\"delmu\"],\n yerr=df[\"delmu_err\"],\n color=\"gray\",\n zorder=0,\n fmt=\"none\",\n marker=\"none\",\n )\n\n ax_left.set_ylim(-2, 2)\n ax_left.set_xlim(0, 1.2)\n ax_left.set_ylabel(f\"{ylabel} residual\", fontsize=18)\n ax_left.tick_params(labelsize=14)\n plt.setp(ax_left.get_xticklabels(), visible=visible)\n if visible is True:\n ax_left.set_xlabel(\"simulated redshift\", fontsize=18)\n for t in threshold_list:\n sel = sel_dic[t]\n n_SNe = len(sel)\n ax_right.hist(\n sel[\"delmu\"],\n orientation=\"horizontal\",\n histtype=\"step\",\n color=cm(t),\n bins=mubins,\n density=True,\n label=f\"{n_SNe} {sntyp} {threshold_sign} {t}\",\n lw=2,\n )\n ax_right.legend(loc=\"lower center\", prop={\"size\": 13})\n plt.setp(ax_right.get_yticklabels(), visible=False)\n plt.setp(ax_right.get_xticklabels(), visible=False)\n ax_right.plot(\n [ax_right.get_xlim()[0], ax_right.get_xlim()[1]],\n np.zeros(len([ax_right.get_xlim()[0], ax_right.get_xlim()[1]])),\n \"k:\",\n )\n\n HRwhisto(TP, sel_TP_dic, ax00, ax01, threshold_sign, ylabel=\"TP\", visible=False)\n HRwhisto(FP, sel_FP_dic, ax10, ax11, threshold_sign, ylabel=\"FP\", visible=True)\n\n # z histos\n n, bins_to_use, tmp = ax20.hist(\n Ias[\"SIM_REDSHIFT_CMB\"], histtype=\"step\", color=\"black\", bins=15, lw=3\n )\n\n for t in threshold_list:\n sel_TP = sel_TP_dic[t]\n sel_FP = sel_FP_dic[t]\n ax20.hist(\n sel_TP[\"SIM_REDSHIFT_CMB\"], histtype=\"step\", color=cm(t), bins=bins_to_use\n )\n ax20.hist(\n sel_FP[\"SIM_REDSHIFT_CMB\"],\n histtype=\"step\",\n color=cm(t),\n linestyle=\"--\",\n bins=bins_to_use,\n )\n ax20.set_xlim(0, 1.2)\n ax20.tick_params(labelsize=14)\n ax20.set_xlabel(\"simulated redshift\", fontsize=18)\n\n # hist stretch\n n, bins_to_use, tmp = ax21.hist(Ias[\"x1\"], color=\"black\", histtype=\"step\", lw=3)\n for t in threshold_list:\n sel_TP = sel_TP_dic[t]\n ax21.hist(\n sel_TP[\"x1\"],\n orientation=\"vertical\",\n histtype=\"step\",\n color=cm(t),\n bins=bins_to_use,\n lw=2,\n )\n ax21.set_xlabel(\"x1\", fontsize=18)\n ax21.yaxis.set_label_position(\"right\")\n ax21.set_xlim(-3, 3)\n ax21.tick_params(labelsize=14)\n # color histo\n n, bins_to_use, tmp = ax31.hist(Ias[\"c\"], color=\"black\", histtype=\"step\", lw=3)\n for t in threshold_list:\n sel_TP = sel_TP_dic[t]\n ax31.hist(\n sel_TP[\"c\"],\n orientation=\"vertical\",\n histtype=\"step\",\n color=cm(t),\n bins=bins_to_use,\n lw=2,\n )\n ax31.set_xlabel(\"c\", fontsize=18)\n ax31.set_xlim(-1, 1)\n ax31.tick_params(labelsize=14)\n ax31.yaxis.set_label_position(\"right\")\n\n gs.tight_layout(fig)\n plt.savefig(nameout)\n plt.close()\n del fig", "def matplotlib_heatmap_chart() -> Tuple:\n df = read_dataset(Path('..', '..', 'iris.csv'))\n df.drop(\"species\", axis=1, inplace=True)\n # Default is pearson's correlation coefficient\n corr_df = df.corr()\n\n fig, ax = a_libraries.matplotlib_heatmap_chart(corr_df.values)\n\n return fig, ax", "def create_heatmap(num_maps, height, width, all_joints, sigma, stride):\n heatmap = np.zeros((height, width, num_maps), dtype=np.float64)\n\n for joints in all_joints:\n for plane_idx, joint in enumerate(joints):\n if joint:\n _put_heatmap_on_plane(heatmap, plane_idx, joint, sigma, height, width, stride)\n\n # background\n heatmap[:, :, -1] = np.clip(1.0 - np.amax(heatmap, axis=2), 0.0, 1.0)\n\n return heatmap", "def get_mch_heatmap(ensemble, methylation_type, grouping, clustering, level, ptile_start, ptile_end, normalize_row, query):\n\ttsne_type = 'mCH_ndim2_perp20'\n\n\tif normalize_row:\n\t\tnormal_or_original = '(normalized by row)'\n\telse:\n\t\tnormal_or_original = ''\n\n\ttitle = level.title() + \" gene body \" + methylation_type + \" by cluster \" + normal_or_original + \": <br>\"\n\tgenes = query.split()\n\n\ts=''\n\tfor i in genes:\n\t\ts=s+','+i\n\n\tgene_labels = list()\n\tgene_info_df = pd.DataFrame()\n\tgene_infos = get_gene_by_id(genes)\n\tfor i, gene in enumerate(gene_infos):\n\t\tgene_name = gene['gene_name']\n\t\tgene_labels.append(gene_name)\n\t\tif i > 0 and i % 10 == 0:\n\t\t\ttitle += \"<br>\"\n\t\ttitle += gene_name + \"+\"\n\t\tgene_info_df[gene_name] = median_cluster_mch(get_gene_methylation(ensemble, methylation_type, gene['gene_id'], grouping, clustering, level, True), grouping, clustering)\n\t\tif gene_info_df[gene_name].empty:\n\t\t\traise FailToGraphException\n\n\ttitle = title[:-1] # Gets rid of last '+'\n\n\tgene_info_df.reset_index(inplace=True)\n\tif grouping == 'annotation':\n\t\tgene_info_df['annotation_cat'] = pd.Categorical(gene_info_df['annotation_'+clustering], cluster_annotation_order)\n\t\tgene_info_df.sort_values(by='annotation_cat', inplace=True)\n\t\tgene_info_df.drop('annotation_cat', axis=1, inplace=True)\n\t\tgene_info_df.set_index(grouping+'_'+clustering, inplace=True)\n\telif grouping == 'cluster':\n\t\tgene_info_df.sort_values(by='cluster_'+clustering, inplace=True)\n\t\tgene_info_df.set_index(grouping+'_'+clustering, inplace=True)\n\telif grouping == 'dataset' or grouping == 'target_region' or grouping == 'slice' or grouping == 'sex':\n\t\tgene_info_df.sort_values(by=grouping, inplace=True)\n\t\tgene_info_df.set_index(grouping, inplace=True)\n\telse:\n\t\tgrouping = 'cluster'\n\t\tgene_info_df.sort_values(by='cluster_'+clustering, inplace=True)\n\t\tgene_info_df.set_index(grouping+'_'+clustering, inplace=True)\n\n\t# For some reason, Plotly doesn't allow 'None' as a group on the x-axis for heatmaps.\n\tif gene_info_df.index.tolist() == ['None']:\n\t\tgene_info_df.index = ['N/A']\n\n\tclusters_labels = gene_info_df.index.tolist()\n\tif grouping == 'cluster':\n\t\tclusters_labels = ['Cluster '+str(i) for i in clusters_labels]\n\n\tnormal_or_original = 'Original'\n\tif normalize_row:\n\t\tfor gene in gene_info_df:\n\t\t\t# z-score\n\t\t\t# gene_info_df[gene] = (gene_info_df[gene] - gene_info_df[gene].mean()) / gene_info_df[gene].std()\n\t\t\t# min-max\n\t\t\tgene_range = gene_info_df[gene].max() - gene_info_df[gene].min()\n\t\t\tif (gene_range==0):\n\t\t\t\tgene_range = 1\n\t\t\tgene_info_df[gene] = (gene_info_df[gene] - gene_info_df[gene].min()) / gene_range\n\t\tnormal_or_original = 'Normalized'\n\n\tgene_info_dict = gene_info_df.to_dict(into=OrderedDict)\n\n\tx, y, text, hover, mch = list(), list(), list(), list(), list()\n\ti = 0\n\tname_prepend = \"\"\n\tif grouping == 'cluster':\n\t\tname_prepend = 'cluster_'\n\tfor key in list(gene_info_dict.keys()):\n\t\tj = 0\n\t\ty.append(key)\n\t\tmch.append(list(gene_info_dict[key].values()))\n\t\tfor cluster in list(gene_info_dict[key].keys()):\n\t\t\tx.append(name_prepend+str(cluster))\n\t\t\ttext.append(build_hover_text(OrderedDict([('Gene', key),\n\t\t\t\t\t\t\t\t\t\t\t\t\t (grouping.title(), x[j]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t (methylation_type, mch[i][j])\n\t\t\t\t\t\t\t\t\t\t\t\t\t ])))\n\t\t\tj += 1\n\t\thover.append(text)\n\t\ttext = []\n\t\ti += 1\n\n\tflat_mch = list(chain.from_iterable(mch))\n\tmch_dataframe = pd.DataFrame(flat_mch).dropna()\n\n\t# Hierarchical clustering and dendrogram\n\tmch = np.array(mch)\n\tfigure = ff.create_dendrogram(mch, orientation=\"right\", labels=tuple([i for i in range(len(genes))])) # TODO: Figure out how to set the colorscale\n\tfor i in range(len(figure['data'])):\n\t\tfigure['data'][i]['xaxis'] = 'x2'\n\tdendro_leaves = figure['layout']['yaxis']['ticktext']\n\tdendro_leaves = list(map(int, dendro_leaves))\n\tmch = mch[dendro_leaves,:] # Reorder the genes according to the clustering\n\tgenes_labels = [gene_labels[i] for i in dendro_leaves]\n\thover_old = hover\n\t# hover = [hover_old[i] for i in dendro_leaves]\n\thover = [str(i) for i in dendro_leaves]\n\n\n\tdendro_top = ff.create_dendrogram(mch.transpose(), orientation=\"bottom\", labels=tuple([i for i in range(mch.shape[1])]))\n\tfor i in range(len(dendro_top['data'])):\n\t\tdendro_top['data'][i]['yaxis'] = 'y2'\n\tdendro_top_leaves = dendro_top['layout']['xaxis']['ticktext']\n\tdendro_top_leaves = list(map(int, dendro_top_leaves))\n\tmch = mch[:,dendro_top_leaves] # Reorder the genes according to the clustering\n\tclusters_labels = [clusters_labels[i] for i in dendro_top_leaves]\n\tmch = list(mch)\n\tfigure.add_traces(dendro_top['data'])\n\n\t# Set color scale limits\n\tstart = mch_dataframe.quantile(ptile_start).values[0].tolist()\n\tend = mch_dataframe.quantile(ptile_end).values[0].tolist()\n\tend = max(end,start+0.01)\n\n\tcolorbar_tickval = list(arange(start, end, (end - start) / 4))\n\tcolorbar_tickval[0] = start\n\tcolorbar_tickval.append(end)\n\tcolorbar_ticktext = [\n\t\tstr(round(x, num_sigfigs_ticklabels)) for x in arange(start, end, (end - start) / 4)\n\t]\n\tif normalize_row == True:\n\t\tcolorbar_ticktext[0] = str(round(start, num_sigfigs_ticklabels))\n\telse:\n\t\tif (round(start,num_sigfigs_ticklabels)) == 0:\n\t\t\tcolorbar_ticktext[0] = str(round(start,num_sigfigs_ticklabels))\n\t\telse:\n\t\t\tcolorbar_ticktext[0] = '<' + str(round(start, num_sigfigs_ticklabels))\n\tcolorbar_ticktext.append('>' + str(round(end, num_sigfigs_ticklabels)))\n\n\t# Due to a weird bug(?) in plotly, the number of elements in tickvals and ticktext\n\t# must be greater than or equal to number of genes in query. Else, javascript throws\n\t# Uncaught Typeerrors when trying to hover over genes. (Tomo 12/11/17)\n\twhile len(colorbar_tickval) < len(genes):\n\t\tcolorbar_tickval.insert(0,start)\n\t\tif normalize_row == True:\n\t\t\tcolorbar_ticktext.insert(0, str(round(start, num_sigfigs_ticklabels)))\n\t\telse:\n\t\t\tcolorbar_ticktext.insert(0, '<' + str(round(start, num_sigfigs_ticklabels)))\n\n\ttrace = Heatmap(\n\t\tx=dendro_top_leaves,\n\t\ty=dendro_leaves,\n\t\tz=mch,\n\t\txtype=\"array\", ytype=\"array\",\n\t\ttext=hover,\n\t\tcolorscale='Viridis',\n\t\tcolorbar={\n\t\t\t'x': 1.0,\n\t\t\t'len': 0.5,\n\t\t\t'title': level.capitalize() + ' ' + methylation_type,\n\t\t\t'titleside': 'right',\n\t\t\t'tickmode': 'array',\n\t\t\t'tickvals': colorbar_tickval,\n\t\t\t'ticktext': colorbar_ticktext,\n\t\t\t'thickness': 10,\n\t\t\t'tickfont': {'size': 10}\n\t\t\t},\n\t\thoverinfo='text',\n\t\tzmin=start,zmax=end,zauto=False, # Clip the extreme edges of the colorscale\n\t\t)\n\ttrace['y'] = figure['layout']['yaxis']['tickvals']\n\ttrace['x'] = dendro_top['layout']['xaxis']['tickvals']\n\tfigure.add_traces([trace])\n\n\tlayout = Layout(\n\t\theight=max(600*len(genes)/20,550), # EAM Adjust the height of the heatmap according to the number of genes displayed\n\t\twidth=1000,\n\t\tpaper_bgcolor='rgba(0,0,0,0)',\n\t\tplot_bgcolor='rgba(0,0,0,0)',\n\t\tshowlegend=False,\n\t\thovermode='closest',\n\t\ttitle=title,\n\t\t# titlefont={'color': 'rgba(1,2,2,1)',\n\t\t# 'size': 16},\n\t\tmargin={'l': 0,\n\t\t\t\t'r': 0,\n\t\t\t\t'b': 100,\n\t\t\t\t't': 150,},\n\t\txaxis={\n\t\t\t'side': 'bottom',\n\t\t\t'tickangle': -45,\n\t\t\t'title': 'Clusters',\n\t\t\t'tickfont': {'size': 12},\n\t\t\t'showticklabels': True,\n\t\t\t'tickmode': 'array',\n\t\t\t'tickvals':trace['x'],\n\t\t\t'ticktext':clusters_labels,\n\t\t\t},\n\t\tyaxis={\n\t\t\t# 'tickangle': 15,\n\t\t\t'tickfont': {'size': 12},\n\t\t\t'showticklabels': True,\n\t\t\t'ticks':\"outside\",\n\t\t\t'tickmode': 'array',\n\t\t\t'tickvals':trace['y'],\n\t\t\t'ticktext':genes_labels,\n\t\t\t},\n\t\t)\n\tlayout['yaxis'].update({'domain': [0, .85]})\n\tlayout['xaxis'].update({'domain': [0.2, 1]})\n\tlayout.update({'hovermode': 'closest'})\n\tlayout.update({'xaxis2': {\n\t\t\t'showticklabels': False\n\t\t\t}})\n\tlayout.update({'yaxis2': {\n\t\t\t'showticklabels': False\n\t\t\t}})\n\tlayout['xaxis2'].update({'domain': [0, 0.1]})\n\tlayout['yaxis2'].update({'domain': [0.86, 1]})\n\tfor xx in ['xaxis','yaxis','xaxis2','yaxis2']:\n\t\tlayout[xx].update({'mirror': False,\n\t\t\t\t\t\t 'showgrid': False,\n\t\t\t\t\t\t 'showline': False,\n\t\t\t\t\t\t 'zeroline': False})\n\n\t# Available colorscales:\n\t# https://community.plot.ly/t/what-colorscales-are-available-in-plotly-and-which-are-the-default/2079\n\tupdatemenus = list([\n\t\tdict(\n\t\t\tbuttons=list([\n\t\t\t\tdict(\n\t\t\t\t\targs=['colorscale', 'Viridis'],\n\t\t\t\t\tlabel='Viridis',\n\t\t\t\t\tmethod='restyle'\n\t\t\t\t),\n\t\t\t\tdict(\n\t\t\t\t\targs=['colorscale', 'Bluered'],\n\t\t\t\t\tlabel='Bluered',\n\t\t\t\t\tmethod='restyle'\n\t\t\t\t),\n\t\t\t\tdict(\n\t\t\t\t\targs=['colorscale', 'Blackbody'],\n\t\t\t\t\tlabel='Blackbody',\n\t\t\t\t\tmethod='restyle'\n\t\t\t\t),\n\t\t\t\tdict(\n\t\t\t\t\targs=['colorscale', 'Electric'],\n\t\t\t\t\tlabel='Electric',\n\t\t\t\t\tmethod='restyle'\n\t\t\t\t),\n\t\t\t\tdict(\n\t\t\t\t\targs=['colorscale', 'Earth'],\n\t\t\t\t\tlabel='Earth',\n\t\t\t\t\tmethod='restyle'\n\t\t\t\t),\n\t\t\t\tdict(\n\t\t\t\t\targs=['colorscale', 'Jet'],\n\t\t\t\t\tlabel='Jet',\n\t\t\t\t\tmethod='restyle'\n\t\t\t\t),\n\t\t\t\tdict(\n\t\t\t\t\targs=['colorscale', 'Rainbow'],\n\t\t\t\t\tlabel='Rainbow',\n\t\t\t\t\tmethod='restyle'\n\t\t\t\t),\n\t\t\t\tdict(\n\t\t\t\t\targs=['colorscale', 'Picnic'],\n\t\t\t\t\tlabel='Picnic',\n\t\t\t\t\tmethod='restyle'\n\t\t\t\t),\n\t\t\t\tdict(\n\t\t\t\t\targs=['colorscale', 'Portland'],\n\t\t\t\t\tlabel='Portland',\n\t\t\t\t\tmethod='restyle'\n\t\t\t\t),\n\t\t\t\tdict(\n\t\t\t\t\targs=['colorscale', 'YlGnBu'],\n\t\t\t\t\tlabel='YlGnBu',\n\t\t\t\t\tmethod='restyle'\n\t\t\t\t)\n\t\t\t]),\n\t\t\tdirection='down',\n\t\t\tshowactive=True,\n\t\t\tx=-0.1,\n\t\t\txanchor='left',\n\t\t\ty=1.43,\n\t\t\tyanchor='top'\n\t\t)\n\t])\n\n\tlayout['updatemenus'] = updatemenus\n\n\t# layout['annotations'].extend([Annotation(text=title,\n\t# x=0.5,\n\t# y=1.3,\n\t# xanchor=\"center\",\n\t# yanchor=\"top\",\n\t# showarrow=False,\n\t# xref=\"paper\",\n\t# yref=\"paper\",\n\t# font={'size': 16,\n\t# 'color': 'black',})])\n\n\tfigure['layout'] = layout\n\n\treturn plotly.offline.plot(figure,\n\t\toutput_type='div',\n\t\tshow_link=False,\n\t\tinclude_plotlyjs=False)", "def make_complex_heatmap(df_data, heatmap_cmap='coolwarm',\n vmax=4,\n vmin=-4,\n figsize=(16, 9),\n row_metadata=None,\n col_metadata=None,\n col_colorbar_anchor=[0.12, 0.1, 0.7, 0.05],\n row_colorbar_anchor=[0.85, 0.15, 0.02, 0.7],\n figname=None):\n # Initialize subplots.\n row_metadata = pd.DataFrame(row_metadata)\n col_metadata = pd.DataFrame(col_metadata)\n n_row = row_metadata.shape[1] + 1\n n_col = col_metadata.shape[1] + 1\n height_ratios = [15] + [1] * (n_col - 1)\n width_ratios = [15] + [1] * (n_row - 1)\n fig, axes = plt.subplots(n_col, n_row, sharex=False, sharey=False, figsize=figsize, gridspec_kw={'height_ratios': height_ratios,\n 'width_ratios': width_ratios,\n 'wspace': 0.1,\n 'hspace': 0})\n if n_row * n_col > 1:\n # Axes are flattened for easier indexing\n axes = axes.ravel()\n main_fig = sns.heatmap(df_data, vmax=vmax, vmin=vmin, ax=axes[\n 0], cbar=False, cmap=heatmap_cmap, robust=True)\n else:\n main_fig = sns.heatmap(\n df_data, vmax=vmax, vmin=vmin, cbar=False, cmap=heatmap_cmap, robust=True)\n # Make the main heatmap as the first subplot\n main_fig_axes = fig.add_axes([0.13, 0.95, 0.7, 0.05])\n main_fig_cb = plt.colorbar(main_fig.get_children()[\n 0], orientation='horizontal', cax=main_fig_axes)\n main_fig_cb.ax.set_title(\"Heatmap\", position=(1.06, 0.1), fontsize=16)\n main_fig.set_xticks([])\n main_fig.set_yticks([])\n main_fig.set_ylabel(\n 'logFC change compared with corresponding DMSO', fontsize=14)\n # Iterate through each metadata dataframe and start ploting the color bar\n # and heatmaps row-wise or column-wise\n for metadata, base_anchor, anchor_offset_location in zip([row_metadata, col_metadata], [row_colorbar_anchor, col_colorbar_anchor], [0, 1]):\n axes_offset = 1\n if metadata is None:\n continue\n # Iterate through each metadata colorbar\n for col in metadata.columns:\n metadata_vector = metadata[col]\n # Handling continuous heatmap sidebar values\n try:\n metadata_vector = metadata_vector.astype(float)\n metadata_vector = pd.DataFrame(metadata_vector, columns=[col])\n levels = metadata_vector[col].sort_values().unique()\n cmap = 'Blues'\n cb_type = 'continuous'\n # Handling descrete heatmap sidebar values, which are factorized.\n except ValueError:\n levels = metadata_vector.factorize()[1]\n metadata_vector = pd.DataFrame(\n metadata_vector.factorize()[0], columns=[col])\n cmap = sns.color_palette(\"cubehelix_r\", levels.shape[0])\n cb_type = 'discreet'\n\n # Calculate the axes index and location of the \"legend\" of the\n # sidebar, which are actually colorbar objects.\n if anchor_offset_location == 0:\n offset = 0.1\n # Column side bar offsets.\n ax = axes[axes_offset]\n cbar_label_orientation = 'vertical'\n cbar_title_location = (1.03, 1)\n else:\n offset = -0.1\n # Row side bar offsets.\n ax = axes[axes_offset * n_row]\n cbar_label_orientation = 'horizontal'\n cbar_title_location = (1.03, 0.1)\n metadata_vector = metadata_vector.transpose()\n\n # Plotting the sidebar and its colorbar\n anchor = base_anchor\n anchor[anchor_offset_location] = anchor[\n anchor_offset_location] + offset\n colorbar_ax = fig.add_axes(anchor)\n g = sns.heatmap(metadata_vector, ax=ax, cbar=False, xticklabels=False,\n yticklabels=False, cmap=cmap, vmax=metadata_vector.values.max() + 1)\n# g.set_title(col)\n if cb_type != 'continuous':\n cb = plt.colorbar(\n g.get_children()[0], orientation=cbar_label_orientation, cax=colorbar_ax)\n # Make correct ticks and tick labels, need to offset the lenth\n # to fix the miss-by-one problem.\n cb.set_ticks(np.arange(0.5, 0.5 + len(levels), 1))\n if anchor_offset_location == 0:\n cb.ax.set_yticklabels(levels.values, fontsize=14)\n else:\n cb.ax.set_xticklabels(levels.values, fontsize=14)\n else:\n cb = plt.colorbar(\n g.get_children()[0], orientation=cbar_label_orientation, cax=colorbar_ax)\n cb.ax.set_title(col, position=cbar_title_location, fontsize=14)\n cb.ax.invert_yaxis()\n # To the next subplot axes\n axes_offset += 1\n # Get rid of empty subplots not used in the figure.\n valid_axes_id = [x for x in range(\n n_col)] + [x * n_row for x in range(n_col)]\n for axes_id in range(len(axes)):\n if axes_id not in valid_axes_id:\n fig.delaxes(axes[axes_id])\n\n # This is a hack in order to make the correct X axis label\n axes[n_row * (n_col - 1)].set_xlabel('Treatments', fontsize=14)\n if figname is not None:\n plt.savefig(figname, bbox_inches='tight')\n plt.close()", "def make_hp_map(data, hp_indices, Nside = 2048, nest = True):\n \n print(\"len hp_indices: {}\".format(len(hp_indices)))\n hp_indices = np.array(hp_indices)\n print(\"shape of hp_indices: {}\".format(hp_indices.shape))\n hp_indices = np.squeeze(hp_indices)\n \n Npix = 12*Nside**2\n map_data = np.zeros(Npix, np.float_)\n map_data[hp_indices] = data\n \n return map_data", "def drawHeatMap(xdat, name=None, colors=pylab.cm.Reds, dendro=False, protColors=None, cIndex=None, km=None, \r\n nameDict={}, scale=None, saveName=None, colorBar=False, figSize=(6,6), topDendro=False, fig=None, axData=None):\r\n\r\n data = xdat['data']\r\n if nameDict is None:\r\n nameList = [i for i in xdat['fractions']]\r\n else:\r\n nameList = [nameDict[i] for i in xdat['fractions']]\r\n\r\n proteins = [i for i in xdat['proteins']]\r\n if fig is None:\r\n fig = pylab.figure(figsize=figSize)\r\n if not (name is None):\r\n fig.suptitle(name)\r\n ##Draw heatmap\r\n xOffset = 0.05\r\n if colorBar:\r\n xLess = 0.10\r\n else:\r\n xLess = 0.00\r\n if dendro:\r\n xStart = 0.375\r\n xLength = 0.55-xLess\r\n else:\r\n xStart = 0.125\r\n xLength = 0.85-xLess\r\n if (km is None) and (topDendro is False):\r\n yStart = 0.05\r\n yLength = 0.9\r\n else:\r\n yStart = 0.05\r\n yLength = 0.8\r\n figAxes = heatMapAxes(data, dims = [xStart, yStart, xLength, yLength], columns=nameList, rows=proteins, protColors=protColors, cIndex=cIndex, fig=fig, colors=colors, axData=axData)\r\n ##Draw colorbar\r\n if colorBar:\r\n fig.colorbar(figAxes)\r\n\r\n if dendro:\r\n ax2Data = fig.add_axes([xOffset, yStart, xLength-0.3, yLength])\r\n sch.dendrogram(xdat['rightDendro'], orientation='right', color_threshold=0.0)\r\n ax2Data.set_xticks([])\r\n ax2Data.set_yticks([])\r\n \r\n if topDendro:\r\n ax4Data = fig.add_axes([xStart, yStart+yLength, xLength, 0.1])\r\n sch.dendrogram(xdat['topDendro'], orientation='down', color_threshold=0.0)\r\n ax4Data.set_xticks([])\r\n ax4Data.set_yticks([])\r\n \r\n if not km is None:\r\n small = data.min()\r\n big = data.max()\r\n if math.fabs(small) > math.fabs(big):\r\n big = 0-small\r\n else:\r\n small = 0-big\r\n offset=0.0\r\n ax3Data = fig.add_axes([xStart, yLength+offset, xLength-0.1, 0.1])\r\n ax3Data.matshow(km, aspect='auto', origin='lower', cmap=colors, vmin=small, vmax=big)\r\n for i in range(len(km)):\r\n ax3Data.text(-0.75, i, 'clus'+str(i), verticalalignment=\"center\", horizontalalignment=\"right\", fontsize=10, color=cIndex(float(i)/(protColors.max()+1)))\r\n ax3Data.set_xticks([])\r\n ax3Data.set_yticks([])\r\n #fig.tight_layout()\r\n if not (saveName is None):\r\n pylab.savefig(saveName)\r\n \r\n return fig", "def getHeatMap(size_w, size_h, boxes):\n kernel = np.array([[0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0]]).astype(np.uint8)\n heat = torch.zeros(size=(size_h, size_w))\n for box in boxes:\n sx, sy, ex, ey = box\n sx, sy, ex, ey = int(sx), int(sy), int(ex), int(ey)\n heat[sy:ey, sx:ex] += 1\n heat = cv2.dilate(np.float32(heat), kernel= kernel, iterations=1)\n heat = cv2.GaussianBlur(np.array(heat), (5, 5), sigmaX=0)\n return heat", "def draw_heatmap_of_subset(cnts, info, genes, names, col_id='case',\n cases= [\"Case12\", \"Case7\", \"Case11\", \"Case8\"], subset_name=\"test\",\n draw=False, fs=(4,4), my_cmap=''):\n assert col_id in info.columns\n info = info[info[col_id].isin(cases)]\n data = info.merge(cnts.T, left_index=True, right_index=True)\n subset_means = []\n gene_means = []\n groups = data.groupby(col_id)\n for case in cases:\n df = groups.get_group(case)\n gene_means.append(pd.Series(df[genes].mean(), name=names[case]))\n subset_means.append((names[case], df[genes].mean().mean()))\n subset_df = pd.DataFrame.from_records(subset_means, columns=[\"Sample\", subset_name], index=\"Sample\")\n subset_df.index.name=\"\"\n gene_df = pd.concat(gene_means, axis=1)\n if draw:\n fig = plt.figure(figsize=fs)\n s = sns.heatmap(np.log2(gene_df + 1), cmap=my_cmap, linewidths=0.5, linecolor='black',\n cbar_kws={'label': 'Log2 TPMs'})\n return fig, ''\n return gene_df, subset_df", "def _heatmap_summary(pvals, coefs, plot_width=1200, plot_height=400):\n warnings.warn(\"This visualization are deprecated.\", DeprecationWarning)\n\n c = coefs.reset_index()\n c = c.rename(columns={'index': 'balance'})\n\n # fix alpha in fdr to account for the number of covariates\n def fdr(x):\n return multipletests(x, method='fdr_bh',\n alpha=0.05 / pvals.shape[1])[1]\n cpvals = pvals.apply(fdr, axis=0)\n\n # log scale for coloring\n log_p = -np.log10(cpvals + 1e-200)\n log_p = log_p.reset_index()\n log_p = log_p.rename(columns={'index': 'balance'})\n p = pvals.reset_index()\n p = p.rename(columns={'index': 'balance'})\n\n cp = cpvals.reset_index()\n cp = cp.rename(columns={'index': 'balance'})\n\n cm = pd.melt(c, id_vars='balance', var_name='Covariate',\n value_name='Coefficient')\n pm = pd.melt(p, id_vars='balance', var_name='Covariate',\n value_name='Pvalue')\n cpm = pd.melt(cp, id_vars='balance', var_name='Covariate',\n value_name='Corrected_Pvalue')\n logpm = pd.melt(log_p, id_vars='balance', var_name='Covariate',\n value_name='log_Pvalue')\n m = pd.merge(cm, pm,\n left_on=['balance', 'Covariate'],\n right_on=['balance', 'Covariate'])\n m = pd.merge(m, logpm,\n left_on=['balance', 'Covariate'],\n right_on=['balance', 'Covariate'])\n m = pd.merge(m, cpm,\n left_on=['balance', 'Covariate'],\n right_on=['balance', 'Covariate'])\n\n hover = HoverTool(\n tooltips=[(\"Pvalue\", \"@Pvalue\"),\n (\"Corrected Pvalue\", \"@Corrected_Pvalue\"),\n (\"Coefficient\", \"@Coefficient\")]\n )\n\n N, _min, _max = len(palette), m.log_Pvalue.min(), m.log_Pvalue.max()\n X = pd.Series(np.arange(len(pvals.index)), index=pvals.index)\n Y = pd.Series(np.arange(len(pvals.columns)), index=pvals.columns)\n m['X'] = [X.loc[i] for i in m.balance]\n m['Y'] = [Y.loc[i] for i in m.Covariate]\n\n # fill in nans with zero. Sometimes the pvalue calculation fails.\n m = m.fillna(0)\n for i in m.index:\n x = m.loc[i, 'log_Pvalue']\n ind = int(np.floor((x - _min) / (_max - _min) * (N - 1)))\n m.loc[i, 'color'] = palette[ind]\n source = ColumnDataSource(ColumnDataSource.from_df(m))\n hm = figure(title='Regression Coefficients Summary',\n plot_width=1200, plot_height=400,\n tools=[hover, PanTool(), BoxZoomTool(),\n WheelZoomTool(), ResetTool(),\n SaveTool()])\n hm.rect(x='X', y='Y', width=1, height=1,\n fill_color='color', line_color=\"white\", source=source)\n Xlabels = pd.Series(pvals.index, index=np.arange(len(pvals.index)))\n Ylabels = pd.Series(pvals.columns, index=np.arange(len(pvals.columns)), )\n\n hm.xaxis[0].ticker = FixedTicker(ticks=Xlabels.index)\n hm.xaxis.formatter = FuncTickFormatter(code=\"\"\"\n var labels = %s;\n return labels[tick];\n \"\"\" % Xlabels.to_dict())\n\n hm.yaxis[0].ticker = FixedTicker(ticks=Ylabels.index)\n hm.yaxis.formatter = FuncTickFormatter(code=\"\"\"\n var labels = %s;\n return labels[tick];\n \"\"\" % Ylabels.to_dict())\n\n return hm", "def group_data(simulation_reports: List[SimulationReport]) -> Dict[float, SimulationTable]:\n heat_maps: OrderedDict[float, SimulationTable] = OrderedDict()\n for report in simulation_reports:\n if report.param not in heat_maps:\n param_name = \"alpha\" if report.growth_type == GrowthType.Polynomial else \"gamma2\"\n simulation_table = heat_maps.setdefault(\n report.param,\n SimulationTable(report.growth_type, param_name, report.param, OrderedDict()),\n )\n else:\n simulation_table = heat_maps[report.param]\n errors_by_prefix = simulation_table.errors.setdefault(report.prefix_length, [])\n errors_by_prefix.append((report.b0, report.error))\n\n return heat_maps", "def heatmap(self, name=\"\", display=True, saveFile = False, saveFig = False, fileLocation=\"\", fullscreen=False, normalization='refbasal'):\n if fileLocation == '':\n fileLocation = self.fileLocation\n # try:\n modules.heatmap(self.experimentFullIntersection.copy(), self.cellLines, self.timePoints, name, display, saveFile, saveFig, fileLocation, fullscreen, normalization)\n # except AttributeError:\n # print(\"ERROR: Combine replicates first.\")", "def plot_heatmap(mi):\n\tfig = plt.figure()\n\tdata = np.array(mi)\n\tfig, ax = plt.subplots()\n\theatmap = ax.pcolor(data, cmap=plt.cm.jet)\n\n\tax.tick_params(direction='out')\n\n\tmajorLocator = MultipleLocator(20)\n\tmajorFormatter = FormatStrFormatter('%d')\n\tminorLocator = MultipleLocator(1)\n\n\tax.xaxis.set_major_locator(majorLocator)\n\tax.xaxis.set_major_formatter(majorFormatter)\n\tax.xaxis.set_minor_locator(minorLocator)\n\n\tax.yaxis.set_major_locator(majorLocator)\n\tax.yaxis.set_major_formatter(majorFormatter)\n\tax.yaxis.set_minor_locator(minorLocator)\n\n\tax.invert_yaxis()\n\tax.xaxis.tick_top()\n\n\t###check which seq belongs to each axe\n\tax.set_xlabel('Seq 2')\n\tax.set_ylabel('Seq 1')\n\n\tax.set_xlim(0, len(mi[1]))\n\tax.set_ylim(len(mi), 0)\n\n\tplt.xticks(rotation=90)\n\n\tcb = plt.colorbar(heatmap)\n\tcb.set_label('MI value')\n\n\t#pdf = PdfPages('heatmap.pdf')\n\t#pdf.savefig(fig)\n\tfig.savefig('heatmap.png')\n\t#pdf.close()", "def create_heatmap(df):\n\n fig = go.Figure(data=go.Heatmap(\n z=df.values.tolist(),\n x=df.columns,\n #y=[classifier for classifier in df.index.values.tolist()],\n y = df.index.values.tolist(),\n hoverongaps = False,\n xgap = 3,\n ygap = 3,\n colorscale=[[0.0, 'rgb(165,0,38)'], [0.1111111111111111, 'rgb(215,48,39)'], [0.2222222222222222, 'rgb(244,109,67)'], [0.3333333333333333, 'rgb(253,174,97)'], [0.4444444444444444, 'rgb(254,224,144)'], [0.5555555555555556, 'rgb(224,243,248)'], [0.6666666666666666, 'rgb(171,217,233)'], [0.7777777777777778, 'rgb(116,173,209)'], [0.8888888888888888, 'rgb(69,117,180)'], [1.0, 'rgb(49,54,149)']]\n ),\n )\n return fig" ]
[ "0.57747865", "0.5651769", "0.56350327", "0.55808216", "0.5573853", "0.5566096", "0.55594", "0.5546572", "0.5511376", "0.54935324", "0.5465989", "0.5449651", "0.5437825", "0.5425046", "0.5419889", "0.53996754", "0.53830385", "0.53745747", "0.534903", "0.52979654", "0.52845615", "0.5252452", "0.5234388", "0.52152795", "0.521238", "0.5189566", "0.5184429", "0.5154875", "0.5145357", "0.5131827" ]
0.7889706
0
Gets the data for a set of probes (including the full profiles), a limit can be provided to avoid overly long queries
def get_profiles(species_id, probes, limit=1000): profiles = ( ExpressionProfile.query.options(undefer("profile")) .filter(ExpressionProfile.probe.in_(probes)) .filter_by(species_id=species_id) .options(joinedload("sequence").load_only("name").noload("xrefs")) .limit(limit) .all() ) return profiles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _retrieve_data(keyw, limit, page=1):\n # Max results per page is 100\n per_page = limit if limit < 100 else 100\n url = BASE_URL + QUALIFIERS % (keyw, per_page, page)\n\n req = requests.get(url)\n r_json = req.json()\n\n if limit > 100:\n r_json['items'].extend(_retrieve_data(keyw, limit - 100, page + 1).\n get('items', []))\n\n return r_json", "def getSamples(self,limit=None):\n theseReadings = self.readings\n if limit:\n theseReadings = theseReadings[:limit]\n return [x.asJSON() for x in theseReadings]", "def get_data_from_profile(self, profile_to_use):\r\n self.cursor.execute(\"SELECT * FROM \" + profile_to_use)\r\n\r\n for row in self.cursor.fetchall():\r\n if row[0] == \"PvNa_UNITS\":\r\n self.output_zakladki.append(row[1])\r\n\r\n a = [elem.strip() for elem in row[2].split(\",\")]\r\n self.output_leki.append(a)\r\n\r\n elif row[0] == \"CEGLY\":\r\n self.output_leki_cegly = [elem.strip() for elem in row[2].split(\",\")]\r\n\r\n elif row[0] == \"lista_cegiel\":\r\n self.output_lista_cegiel = [elem.strip() for elem in row[2].split(\",\")]", "def get(self, base_url, observable, limit, credentials):\n\n url = url_join(base_url, self.filter(observable)) + f'&$top={limit}'\n\n response = get_data(url, credentials)\n\n return [\n self.sighting(observable, x) for x in response.get('value', [])\n ]", "def query_all():\n all_nutrients = {}\n offset_counter = 0\n\n while offset_counter <= 7500:\n query_string = 'https://api.nal.usda.gov/ndb/nutrients/?format=json&api_key={}&nutrients=205&nutrients=204&nutrients=208&nutrients=269&offset={}'.format(api_key, offset_counter)\n results = requests.get(query_string)\n all_nutrients[offset_counter] = results.json()\n offset_counter += 150\n\n print all_nutrients", "def get_brapi_trials(endpoint):\n page = 0\n pagesize = 10\n maxcount = None\n while maxcount is None or page*pagesize < maxcount:\n params = {'page': page, 'pageSize': pagesize}\n r = requests.get(endpoint+'trials', params=params)\n if r.status_code != requests.codes.ok:\n raise RuntimeError(\"Non-200 status code\")\n maxcount = int(r.json()['metadata']['pagination']['totalCount'])\n for trial in r.json()['result']['data']:\n yield trial\n page += 1", "def fetch_all(profile):\n params = {}\n params[\"profile\"] = profile\n response = utils.do_request(instanceprofile, \"get\", params)\n data = utils.get_data(\"InstanceProfiles\", response)\n return data", "async def all(self):\n log('retrieving profile cards..')\n start = time.monotonic()\n profiles = await asyncio.gather(*list(\n map(lambda profile: self.retrieve(str(profile['id'])), await self.list())\n ))\n elapsed = \"%0.2fs\" % (time.monotonic() - start,)\n log(\"retrieved {} profile cards in {}\".format(len(profiles), elapsed))\n return {\n \"hits\": len(profiles),\n \"updated\": time.time() * 1000,\n \"time\": elapsed,\n \"applicants\": list(filter(None, map(self.parse, profiles)))\n }", "def profiles(self):\n with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:\n return list(filter(lambda x: x is not None, executor.map(self.profile_details, self.profiles_names())))", "def getmeta(personId,limit=1000,showTagged=True,lastKeyTS=0):\n db = boto3.resource('dynamodb', region_name='us-east-1')\n table = db.Table(cfg['person_recognition_table'])\n\n if limit is None:\n limit = 500\n\n if lastKeyTS is None:\n lastKeyTS = 0\n\n if showTagged is None:\n showTagged = True\n\n limit = int(limit)\n lastKeyTS = int(lastKeyTS)\n showTagged = bool(int(showTagged))\n\n if personId:\n\n resp = table.query(ProjectionExpression='cameraId,ts,personId,videos,matches',\n IndexName='personId-index',\n KeyConditionExpression=Key('personId').eq(personId)\n )\n\n if resp['Count'] > 0:\n item = resp['Items'][0]\n\n faces = []\n if 'faces' in item:\n for f in item['faces']:\n tlx = int(f['bbox'][0])\n tly = int(f['bbox'][1])\n blx = int(f['bbox'][2])\n bly = int(f['bbox'][3])\n faces.append([tlx, tly, blx, bly])\n\n matches = []\n if 'matches' in item:\n for m in item['matches']:\n matches.append(str(m))\n\n known = False\n if 'known' in item:\n known = bool(item['known'])\n\n tagged = False\n if 'tagged' in item:\n tagged = bool(item['tagged'])\n\n ts = int(item['ts'])\n\n return [{'cameraId' : item['cameraId'],\n 'personId': personId,\n 'ts': ts,\n 'matches':matches,\n 'videos': item['videos']\n }]\n\n return {'info': 'personId {} not found'.format(personId)}\n\n else:\n\n items = []\n continue_scan = True\n last_evaluated_key = None\n\n while continue_scan:\n\n if last_evaluated_key:\n # grab x number of non-tagged items\n if showTagged:\n\n resp = table.query(ProjectionExpression='cameraId,ts,personId,videos,known,tagged',\n KeyConditionExpression=Key('cameraId').eq('1') & Key('ts').lt(lastKeyTS),\n Limit=limit,\n ScanIndexForward=False,\n ExclusiveStartKey=last_evaluated_key\n )\n else:\n resp = table.query(ProjectionExpression='cameraId,ts,personId,videos,known,tagged',\n KeyConditionExpression=Key('cameraId').eq('1') & Key('ts').lt(lastKeyTS),\n FilterExpression=Key('tagged').eq(False),\n Limit=limit,\n ScanIndexForward=False,\n ExclusiveStartKey=last_evaluated_key\n )\n else:\n if showTagged:\n\n resp = table.query(ProjectionExpression='cameraId,ts,personId,videos,known,tagged',\n KeyConditionExpression=Key('cameraId').eq('1') & Key('ts').lt(lastKeyTS),\n Limit=limit,\n ScanIndexForward=False\n )\n else:\n resp = table.query(ProjectionExpression='cameraId,ts,personId,videos,known,tagged',\n KeyConditionExpression=Key('cameraId').eq('1') & Key('ts').lt(lastKeyTS),\n FilterExpression=Key('tagged').eq(False),\n Limit=limit,\n ScanIndexForward=False\n )\n if 'LastEvaluatedKey' in resp:\n continue_scan = True\n else:\n continue_scan = False\n\n if continue_scan:\n last_evaluated_key = resp['LastEvaluatedKey']\n\n if 'Count' not in resp:\n return {'error': 'table query failed'}\n\n if resp['Count'] > 0:\n for i in resp['Items']:\n j = {'cameraId' : i['cameraId'],\n 'personId': str(i['personId']),\n 'ts': int(i['ts']),\n 'videos': i['videos'],\n 'known' : bool(i['known']),\n 'tagged': bool(i['tagged'])\n }\n items.append(j)\n\n return items", "def test_get_risk_profile_all_using_get(self):\n pass", "def __test_profile(self, bk):\n for arg in self.args['profile']:\n ds = ArgoDataFetcher(backend=bk).profile(*arg).to_xarray()\n assert isinstance(ds, xr.Dataset) == True", "def gather_all_profiles(year, month):\n page = 1\n urls = []\n\n print(\"{}-{} : Begin indexing.\".format(year, month))\n\n while (page > 0):\n urlstring = \"http://scamdigger.com/{}/{}/page/{}\".format(year,month,page) \n jitter = random.choice([0,1])\n try:\n urlhandle = urlopen(urlstring)\n urls += enumerate_profiles(urlhandle, page)\n # time.sleep(1+jitter)\n page += 1\n except:\n page = 0\n\n print(\"{}-{} : {} profiles\".format(year,month,len(urls)))\n\n for url in urls:\n uid = url[30:-1]\n outfile=PROFILES+os.sep+uid+'.json'\n jitter = random.choice([0,1])\n try:\n urlhandle = urlopen(url)\n scrape_profile(urlhandle, outfile, year, month)\n # time.sleep(1+jitter)\n except Exception as e:\n print(\"Exception when handling {}\".format(url))\n print(e)\n \n print(\"{}-{} : complete.\".format(year,month))", "def get_all(granularity: str, limit: int = 0) -> list:\n if granularity == \"province\":\n table = \"area_province\"\n deserilize_func = deserilize_edpidemic_province\n else:\n table = \"area_city\"\n deserilize_func = deserilize_edpidemic_city\n\n if limit <= 0:\n limit = None\n\n # table name is not transormbale by psycopg2\n SQL = \"\"\"SELECT * FROM {table} LIMIT %(limit)s\"\"\".format(table=table)\n\n results = database.query(sql=SQL, table=table, limit=limit)\n return list(map(deserilize_func, results))", "def info_search(profiles: list, session: requests.Session):\n counter = 0\n results = []\n for profile in profiles:\n info = {}\n sleep(0.5)\n soup = BeautifulSoup(session.get(profile).text, \"lxml\")\n details = soup.find(\"div\", class_=\"page-content seaman-page-content\")\n info['name'] = details.h1.text\n try:\n for row in details.find_all(\"div\", class_=\"colmn3\"):\n row_content = row.text.split(\":\")\n if row_content[0] == \"Personal mobile number\":\n info['phone'] = row_content[1]\n if row_content[0] == \"E-Mail\":\n info['email'] = row_content[1]\n if 'phone' in info and duplicates_checker(results, info['phone']):\n results.append(info)\n vcard_handler(info)\n except AttributeError:\n continue\n if len(results) % 10 == 0:\n counter += 10\n logger.info(f'{counter} profiles added out of {len(profiles)}')\n logger.info(f'Finished adding {len(profiles)} profiles')\n return results", "def load_presn_runs(offset, limit):\n now_local = datetime.now()\n timelimit = timedelta(hours=100)\n server = couchdb.Server(\"http://snoplus:\"+app.config[\"COUCHDB_PASSWORD\"]+\"@\"+app.config[\"COUCHDB_HOSTNAME\"])\n db = server[\"pre-supernova\"]\n results = []\n skip = offset\n all = db.view('_design/presn/_view/presn_by_date_run', descending=True, skip=skip)\n total = all.total_rows\n offset = all.offset\n for row in db.view('_design/presn/_view/presn_by_date_run', descending=True, limit=limit, skip=skip):\n year = row.key[0]\n mon = row.key[1]\n day = row.key[2]\n hour = row.key[3]\n minute = row.key[4]\n sec = row.key[5]\n run = row.value\n run_id = row.id\n runtime=datetime(year, mon, day, hour, minute, sec)\n timediff = now_local - runtime\n if timediff<timelimit:\n try:\n results.append(dict(db.get(run_id).items()))\n except KeyError:\n app.logger.warning(\"Code returned KeyError searching for presn information in the couchDB. Run Number: %d\" % run)\n results.sort(reverse=True)\n return results, total, offset, limit", "async def test_retrieve_many(self):\n expected = [{\n '_id': 'id',\n 'name': 'name',\n 'version': 4,\n 'status': 'active'\n }]\n rsps = respx.get(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles') \\\n .mock(return_value=Response(200, json=expected))\n profiles = await provisioning_client.get_provisioning_profiles(5, 'active')\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles?version=5&status=active'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'\n assert profiles == expected", "def get_all(self, q=None, limit=None):\r\n q = q or []\r\n if limit and limit < 0:\r\n raise ClientSideError(_(\"Limit must be positive\"))\r\n kwargs = _query_to_kwargs(q, storage.SampleFilter.__init__)\r\n kwargs['meter'] = self.meter_name\r\n f = storage.SampleFilter(**kwargs)\r\n return [OldSample.from_db_model(e)\r\n for e in pecan.request.storage_conn.get_samples(f, limit=limit)\r\n ]", "def getProbes(self):\n probes = \"\"\n try:\n p = self.fetchProbes()\n for probe in p:\n probes += \"{id};{ip};{status};\\n\".format(id = probe.getId(), ip = probe.getIp(), status = probe.getStatus())\n except ProbeConnectionFailed:\n probes = self.error\n finally:\n return probes", "def getUniverses(limit=None):\n url = f\"https://develop.roblox.com/v1/user/universes?limit={limit}&sortOrder=Desc\"\n if limit in (10, 25, 50):\n r = requests.get(url, cookies=cookie)\n j = json.loads(r.text)\n return j\n else:\n limit = 50\n r = requests.get(url, cookies=cookie)\n j = json.loads(r.text)\n return j", "def get_all(self, q=None, limit=None):\r\n q = q or []\r\n\r\n if limit and limit < 0:\r\n raise ClientSideError(_(\"Limit must be positive\"))\r\n kwargs = _query_to_kwargs(q, storage.SampleFilter.__init__)\r\n f = storage.SampleFilter(**kwargs)\r\n return map(Sample.from_db_model,\r\n pecan.request.storage_conn.get_samples(f, limit=limit))", "def get_table_data(table_name, query, pages, table_columns, headers, base_url, maxpagesize):\n\n\n logging.info(\"Running get_table_data() . . . \")\n table_data = []\n for p in range(pages):\n page_number = p + 1\n\n #print('\\tGetting page number {}'.format(page_number))\n #print(\"Running TEST MESSAGE . . . \")\n\n endpoint = '{0}/ws/schema/table/{1}?{2}page={3}&pagesize={4}&projection={5}'.format(base_url, table_name, query, page_number, maxpagesize, table_columns)\n r_data = requests.get(endpoint, headers=headers)\n\n if r_data.ok:\n data_json = r_data.json()\n records = data_json['record']\n for r in records:\n table_data.append(r['tables'][table_name])\n else:\n logging.info(r_data.text)\n raise Exception(r_data.text)\n\n return table_data", "def test_get_many(self):\r\n\r\n with open(os.path.join(RESOURCE_PATH, 'ND072023.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n\r\n particles = parser.get_records(54)\r\n log.debug('got back %d records', len(particles))\r\n\r\n self.assert_particles(particles, 'ND072023_recov.yml', RESOURCE_PATH)", "def get_results(job, limit):\n reader = results.ResultsReader(job.results(count=limit))\n return {\"results\": [row for row in reader]}", "def aggregated_results(self, limit=2000) -> List[dict]:\n stored_events = []\n for events in self._iter_events():\n stored_events.extend(events)\n if len(stored_events) >= limit:\n return stored_events[:limit]\n return stored_events", "def make_data(self, limit: int):", "def _granule_samples(found_collections, filters, limit, config):\n found_granules = []\n for concept in found_collections:\n query = {\"concept_id\": concept}\n granules = search(query, filters=filters, limit=limit, config=config)\n found_granules.extend(granules)\n return found_granules[:len(found_collections)*limit]", "def dashboard():\n features = request.args.get('features', None)\n interval = request.args.get('interval', \"all_time\")\n features = DeviceData.features() if features is None else features.split(\",\")\n\n if interval == \"past_minute\":\n cutoff_time = dt.now() - timedelta(minutes=1)\n elif interval == \"past_hour\":\n cutoff_time = dt.now() - timedelta(minutes=60)\n else:\n cutoff_time = None\n\n response = {}\n for feature in features:\n key = \"_\".join([feature, interval])\n cache = json.loads(memcached.get(key))\n\n # cache hit based on timestamp of oldest record\n cache_timestamp = dt.fromisoformat(cache[\"timestamp\"])\n if cutoff_time and cache_timestamp >= cutoff_time:\n top_readings = [dd_dict for _, _, dd_dict in cache[\"minmaxes\"]]\n\n # cache miss, query and update memcached\n else:\n num_top_devices = int(environ.get('NUM_TOP_DEVICES'))\n # get the records by max feature value, distinct on deviceId\n # max value over all time\n if cutoff_time is None:\n tr_query = db.session.query(\n DeviceData.deviceId,\n func.max(getattr(DeviceData, feature))\n ).group_by(DeviceData.deviceId)\n else:\n tr_query = db.session.query(\n DeviceData.deviceId,\n func.max(getattr(DeviceData, feature))\n ).group_by(\n DeviceData.deviceId\n ).filter(DeviceData.timestamp >= cutoff_time)\n\n # get the full records to return\n top_records = tr_query.limit(num_top_devices).all()\n device_data = DeviceData.query.filter(\n tuple_(DeviceData.deviceId, getattr(DeviceData, feature)).in_(top_records)\n )\n top_readings = sorted(\n [dd.to_dict() for dd in device_data],\n reverse=True,\n key=(lambda x: x[feature])\n )\n # update memcached values with db query results\n timestamp, array = dt.now().isoformat(), []\n if len(top_readings) > 0:\n timestamp = min(x[\"timestamp\"] for x in top_readings)\n array = [\n [-dd_dict[feature], dd_dict[\"deviceId\"], dd_dict]\n for dd_dict in top_readings]\n memcached.set(\n key,\n json.dumps({\"timestamp\": timestamp, \"minmaxes\": array})\n )\n response[feature] = top_readings\n\n return jsonify(response)", "def _fetch_sample_data_from_user_query(self) -> TableData:\n rnd = self.session.execute(f\"{self._profile_sample_query}\")\n try:\n columns = [col.name for col in rnd.cursor.description]\n except AttributeError:\n columns = list(rnd.keys())\n return TableData(\n columns=columns,\n rows=[list(row) for row in rnd.fetchmany(100)],\n )", "def fetch_sample_data(self, table) -> TableData:\n sampler = DatalakeSampler(\n session=self.client,\n table=self.data_frame_list,\n profile_sample_config=self.profile_sample_config,\n partition_details=self.partition_details,\n profile_sample_query=self.profile_query,\n )\n return sampler.fetch_dl_sample_data()" ]
[ "0.60100675", "0.58483875", "0.5483683", "0.5466175", "0.5350069", "0.53199553", "0.5314926", "0.53077745", "0.5280295", "0.52494043", "0.5166348", "0.5158148", "0.51288855", "0.5115098", "0.5088638", "0.50777245", "0.5070165", "0.5061214", "0.50397336", "0.50375605", "0.50293726", "0.5025832", "0.5022995", "0.5019288", "0.5011114", "0.50023395", "0.4979552", "0.49556515", "0.49368206", "0.49260306" ]
0.67031264
0
Fill all pixels of the surface with color, preserve transparency.
def fill(surface, color): w, h = surface.get_size() r, g, b, _ = color for x in range(w): for y in range(h): a = surface.get_at((x, y))[3] surface.set_at((x, y), pygame.Color(r, g, b, a))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fill_color(self, _col):\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n self.__framebuffer[(x, y)] = _col", "def main_background():\n surface.fill(COLOR_GRAY)", "def fill(self, color):", "def fill(self, colour: int, /) -> None:", "def fill(framebuf, color):\n if color:\n fill = 0xFF\n else:\n fill = 0x00\n for i in range(len(framebuf.buf)): # pylint: disable=consider-using-enumerate\n framebuf.buf[i] = fill", "def fill(framebuf, color):\n if color:\n fill = 0xFF\n else:\n fill = 0x00\n for i in range(len(framebuf.buf)): # pylint: disable=consider-using-enumerate\n framebuf.buf[i] = fill", "def fill(framebuf, color):\n fill = (color >> 16) & 255, (color >> 8) & 255, color & 255\n for i in range(0, len(framebuf.buf), 3):\n framebuf.buf[i : i + 3] = bytes(fill)", "def fill(self, color):\n color = spyral.color._determine(color)\n self._surf.fill(color)", "def solid_color(color):\n surface = pygame.Surface((1,1))\n surface.fill(color)\n return surface", "def fill(self, rgb, alpha=100):\n self.call('fill', rgb, alpha)", "def main_background():\n surface.fill(COLOR_BACKGROUND)", "def glclear(self):\n self.pixels = [\n [color(self.r, self.g, self.b) for x in range(self.width)]\n for y in range(self.height)\n ]", "def __fill_lip_solid(self, outer, inner):\n inner[0].reverse()\n inner[1].reverse()\n outer_curve = zip(outer[0], outer[1])\n inner_curve = zip(inner[0], inner[1])\n points = []\n for point in outer_curve:\n points.append(np.array(point, dtype=np.int32))\n for point in inner_curve:\n points.append(np.array(point, dtype=np.int32))\n points = np.array(points, dtype=np.int32)\n self.red_l = int(self.red_l)\n self.green_l = int(self.green_l)\n self.blue_l = int(self.blue_l)\n cv2.fillPoly(self.image, [points], (self.red_l, self.green_l, self.blue_l))", "def fill(framebuf, color):\n if color:\n bits = color & 0b11\n fill = (bits << 6) | (bits << 4) | (bits << 2) | (bits << 0)\n else:\n fill = 0x00\n\n framebuf.buf = [fill for i in range(len(framebuf.buf))]", "def fill(self, color):\n self._surf.fill(color)\n self._version += 1\n spyral.util.scale_surface.clear(self._surf)\n return self", "def fill_rect(framebuf, x, y, width, height, color):\n # pylint: disable=too-many-arguments\n fill = (color >> 16) & 255, (color >> 8) & 255, color & 255\n for _x in range(x, x + width):\n for _y in range(y, y + height):\n index = (_y * framebuf.stride + _x) * 3\n framebuf.buf[index : index + 3] = bytes(fill)", "def fill():\n # Switch in edit mode\n bpy.ops.object.mode_set(mode = 'EDIT')\n \n # Fill hole\n bpy.ops.mesh.fill()", "def change_colour_surface(surface, r, g, b):\n arr = pg.surfarray.pixels3d(surface)\n arr[:, :, 0] = r\n arr[:, :, 1] = g\n arr[:, :, 2] = b", "def paint_fill(image, col, row, color, orig_color):\n\n if image[row][col] != orig_color:\n return\n if row < 0 or row >= len(image) or col < 0 or col >= len(image[0]):\n return\n\n image[row][col] = color\n\n paint_fill(image, col - 1, row, color, orig_color)\n paint_fill(image, col + 1, row, color, orig_color)\n paint_fill(image, col, row - 1, color, orig_color)\n paint_fill(image, col, row + 1, color, orig_color)\n\n return", "def pixel( self, x, y, c = '#ffffff' ):\n self.raster.put( c, ( x, y ) )", "def setSurfaceColors(topcolor=-1,bottomcolor=-1):\n dislin.surclr(topcolor, bottomcolor)", "def fill(self, color: int) -> None:\n red = (color >> 16) & 0xFF\n green = (color >> 8) & 0xFF\n blue = color & 0xFF\n for x in range(24):\n offset = unpack_from(\">HHH\", self.ledmap_bytes, x * 6)\n self._is31[offset[self.r_offset]] = red\n self._is31[offset[self.g_offset]] = green\n self._is31[offset[self.b_offset]] = blue", "def fill(self, color: int) -> None:\n red = (color >> 16) & 0xFF\n green = (color >> 8) & 0xFF\n blue = color & 0xFF\n for x in range(24):\n offset = unpack_from(\">HHH\", self.ledmap_bytes, x * 6)\n self._is31[offset[self.r_offset]] = red\n self._is31[offset[self.g_offset]] = green\n self._is31[offset[self.b_offset]] = blue", "def fill(self, arr, color=None):\n\n for point in self.points:\n arr[point.x][point.y] = color if color is not None else point.color", "def fill_rect(framebuf, x, y, width, height, color):\n # pylint: disable=too-many-arguments\n for _x in range(x, x + width):\n offset = 7 - _x & 0x07\n for _y in range(y, y + height):\n index = (_y * framebuf.stride + _x) // 8\n framebuf.buf[index] = (framebuf.buf[index] & ~(0x01 << offset)) | (\n (color != 0) << offset\n )", "def fill(self, framebuf, color):\n rgb565_color = self.color_to_rgb565(color)\n for i in range(0, len(framebuf.buf), 2):\n framebuf.buf[i : i + 2] = rgb565_color", "def pixel(self, x: int, y: int, colour: int, /) -> None:", "def set_transparent(img):\n assert img.shape[-1] == 4\n white_pix = np.all(img == [255, 255, 255, 255], axis=-1)\n # print(white_pix)\n img[white_pix, -1] = 0\n # return img", "def clear(self, fill = 0x00):\n self._buffer = [ fill ] * ( self.width * self.height )", "def fill_rect(self, framebuf, x, y, width, height, color):\n # pylint: disable=too-many-arguments\n rgb565_color = self.color_to_rgb565(color)\n for _y in range(2 * y, 2 * (y + height), 2):\n offset2 = _y * framebuf.stride\n for _x in range(2 * x, 2 * (x + width), 2):\n index = offset2 + _x\n framebuf.buf[index : index + 2] = rgb565_color" ]
[ "0.6754669", "0.6575468", "0.6551719", "0.65116376", "0.6489565", "0.6489565", "0.646568", "0.64328253", "0.63633025", "0.6341166", "0.6229023", "0.62250054", "0.6200527", "0.619609", "0.6117054", "0.6111317", "0.60966897", "0.6091011", "0.6048498", "0.5998635", "0.5975528", "0.5929514", "0.5929514", "0.5901753", "0.5899583", "0.58935267", "0.585263", "0.58494246", "0.58174276", "0.5805012" ]
0.77070856
0
Returns the absolute url for this plan_proposal for preview purposes.
def get_absolute_url(self): return reverse('plan_proposal', kwargs = {'project_name': self.project.slug, 'proposal_name': self.slug})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_absolute_url(self) -> str:\n return self.proposition.get_absolute_url()", "def get_absolute_url(self) -> str:\n return self.cagnotte.get_absolute_url()", "def get_url(self):\n return self.resource.url", "def get_absolute_url(self) -> str:\n return reverse(\"cv_detail\", kwargs={\"pk\": self.pk})", "def get_absolute_url(self):\n return get_front_end_url(self)", "def url(self):\n url = self.url\n return url", "def url(self):\n return self.full()", "def url(self) -> str:\n return self._url", "def url(self) -> str:\n return self._url", "def url(self) -> str:\n return self._url", "def get_absolute_url(self):\n return reverse('project-detail', kwargs={'pk': self.pk})", "def display_url(self) -> Optional[str]:\n return pulumi.get(self, \"display_url\")", "def get_absolute_url(self):\n if self.kind == \"persona_profile\":\n p = Persona.query.filter(Persona.profile_id == self.id).first()\n return url_for(\"persona\", id=p.id)\n elif self.kind == \"group_profile\":\n g = Group.query.filter(Group.profile_id == self.id).first()\n return url_for(\"group\", id=g.id)\n elif self.kind == \"index\":\n p = Persona.query.filter(Persona.index_id == self.id).first()\n return url_for(\"persona\", id=p.id)", "def getUrl(self):\n\n return self.toUrlForm()", "def url(self) -> str:\n return self.url_as()", "def get_url(self):\n\n return self.url", "def get_url(self):\n\n return self.url", "def get_url(self):\n return self.url", "def get_url(self):\n return self.url", "def get_absolute_url(self) -> str:\n return reverse(\n \"cagnottesolidaire:proposition\",\n kwargs={\"slug\": self.slug, \"p_slug\": self.cagnotte.slug},\n )", "def get_absolute_url(self):\n return ('')", "def get_absolute_url(self):\n return ('project_detail', (), {\n 'name': self.title\n })", "def get_url(self):\n return self.url.format(\n base_url=self.base_url,\n description=urllib.quote_plus(self.description),\n location=urllib.quote_plus(self.location),\n )", "def url(self) -> str:\n return f\"{self._get_base_url()}{self.path_extension}\"", "def get_url(self):\n return self._url", "def url(self):\n\n return self._url", "def url(self):\n\n return self._url", "def url(self):\n\n return self._url", "def url(self):\n return self._url", "def url(self):\n return self._url" ]
[ "0.7132922", "0.68332034", "0.67404026", "0.6709807", "0.67093635", "0.66793925", "0.6630603", "0.65069014", "0.65069014", "0.65069014", "0.64967704", "0.64927083", "0.649157", "0.6489108", "0.6484969", "0.64831454", "0.64831454", "0.6449211", "0.6449211", "0.6435623", "0.6399839", "0.6395059", "0.63787764", "0.63712984", "0.6364208", "0.63551635", "0.63551635", "0.63551635", "0.6354352", "0.6354352" ]
0.819664
0
Sent by clients when they enter a room. A status message is broadcast to all people in the room.
def joined(message): #room = session.get('room') room='abc' join_room(room) #emit('status', {'msg': session.get('name') + ' has entered the room.' + message['msg']}, room=room) emit('status', {'msg': 'Yao has entered the room.'}, room=room) #emit('status', {'msg': 'Yao has entered the room.'}, room='room1')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def joined(message):\n room = session.get('room')\n join_room(room)\n emit('status', {'msg': session.get('name') + ' has entered the room.'}, room=room)", "def joined(message):\n room = session.get('room')\n join_room(room)\n emit('status', {'msg': session.get('name') + ' has entered the room.'}, room=room)", "def joined(message):\n global list_messages\n room = session.get('room')\n join_room(room)\n print ('joined session list_messages ' + str(len(list_messages)) + ' , session ' + str(session) +'\\n')\n emit('status', {'msg': str(clients)})\n for x in list_messages:\n emit('status', {'msg': x})\n emit('status', {'msg': session.get('name') + ' has entered the room.'}, room=room)", "def handle_enter_room_session(self, lobby_command, client_socket):\n words = lobby_command.split()\n sent_name = words[1]\n user = self.clients[client_socket]['data'].decode('utf-8')\n for room in self.rooms:\n if room.name == sent_name and user in room.room_attrbts['members']:\n room.room_attrbts['active'].add(user)\n msg = f'User {user} is a member of room {sent_name}. Entering user into active mode for this room. ACTIVE'\n print(msg)\n return\n msg = f'Room {sent_name} not found or user {user} is not yet a member. NONACTIVE'\n self.log_and_send(client_socket, msg)\n return", "async def status(self, ctx:utils.Context, status:str):\n\n status_o = getattr(discord.Status, status.lower())\n await self.bot.change_presence(activity=self.bot.guilds[0].me.activity, status=status_o)", "def joined(message):\n room = session.get('room')\n join_room(room)\n emit('status', {'msg': session.get('name') + ' joined'}, room=room)", "def on_status(self, status):\n log.debug(\"Received status: %d\", status.id)", "def on_join(data):\n username = request.sid\n room = data\n join_room(room)\n logging.info(username + ' has entered the room.')\n send(username + ' has entered the room.', room=room)", "def start(self, event):\n self.send_presence()\n self.get_roster()\n self.send_message(mto=self.recipient, mbody=self.msg, mtype='chat')\n self.disconnect(wait=True)", "def send_game_status(ok, msg, client_key, from_name, send_message_func ):\n\n status_type = messageActions.Action_status.TYPE_GAME\n\n new_client_message = message.Message( client_key, 's' )\n new_message = new_client_message.new_message( from_name, status_type, ok, msg )\n new_client_message.message = new_message\n new_client_message.to_clients = [ client_key ]\n\n send_message_func( new_client_message )", "def send_status_update(self):\n command = {\n \"light_id\": self.id,\n \"status\": self.status,\n \"created\": self.created.strftime(\"%a %d %b %Y %H:%M\"),\n }\n Group('lights').send({\n # WebSocket text frame, with JSON content\n \"text\": json.dumps(command),\n })", "def on_invite(self, room_id, state):\n logger.info('got invite for room %s', room_id)\n self.client.join_room(room_id)", "def start(self, event):\n self.send_presence()\n self.get_roster()", "def start(self, event):\n\t\tself.get_roster()\n\t\tself.send_presence()", "def join_room(self, client, room):\n if room.verify_if_is_invited(client):\n room.add_member(client)\n self.send_message('Te has unido a la sala {}'.format(room.get_name()), client)\n else:\n self.send_message('No estas invitado a la sala.', client)", "def on_join(data):\r\n\r\n username = data[\"username\"]\r\n room = data[\"room\"]\r\n join_room(room)\r\n\r\n # Broadcast that new user has joined\r\n send({\"msg\": username + \" has joined the \" + room + \" room.\"}, room=room)", "def update_status(self) -> None:\n try:\n (rc, mid) = self.mqttc.publish(\n self.config.status_topic, json.dumps(self.status), qos=0, retain=False\n )\n if rc == mqtt.MQTT_ERR_SUCCESS:\n logging.info(\n f\"The request for a status update has been successfully accepted: mid={mid}\"\n )\n else:\n logging.warning(\"The request for a status update has been rejected\")\n except ValueError as e:\n logging.warning(f\"Cannot send status update: {e}\")", "async def status(self, context):\n await self.send_message(context, await self.status_msg_packed(context))", "def broadcast():\n # global receiving_message\n # if not receiving_message:\n router.broadcast(clients.copy(), json.dumps(current_state))", "async def status(self, ctx, *, status=None):\n # [p]set status <status>\n\n statuses = {\n \"online\": discord.Status.online,\n \"idle\": discord.Status.idle,\n \"dnd\": discord.Status.dnd,\n \"invisible\": discord.Status.invisible\n }\n\n server = ctx.message.server\n\n current_game = server.me.game if server is not None else None\n\n if status is None:\n await self.bot.change_presence(status=discord.Status.online,\n game=current_game)\n await self.bot.say(\"Status reset.\")\n else:\n status = statuses.get(status.lower(), None)\n if status:\n await self.bot.change_presence(status=status,\n game=current_game)\n await self.bot.say(\"Status changed.\")\n else:\n await send_command_help(ctx)", "def on_status_update(self, data):\n # TODO: Update User/Client object with this info\n print ('Status Update: %s' % data)", "def monitor(bot):\n\n timestamp = datetime.now()\n \n # If the monitor status file exists, clubroom is open.\n if os.path.exists(STATUSFILE):\n if not get_state():\n print(timestamp.strftime(\"[%d %b, %H:%M]\") + \" -- Open!\")\n set_state(OPEN)\n \n # Randomly choose and send alert message to channel\n alert = ALERT[random.randint(0, len(ALERTS) - 1)]\n for channel in bot.channels:\n bot.msg(channel, alert)\n else:\n if get_state():\n print(timestamp.strftime(\"[%d %b, %H:%M]\") + \" -- Closed!\")\n for channel in bot.channels:\n bot.msg(channel, \"Activity ended.\"))\n clear_topic()\n set_state(CLOSED)\n update_report(\"\", \"\", \"\")", "def status(self, msg):\n oscid = self.app.global_osc_id()\n print(\"STATUS : /Llia/%s : %s\" % (oscid, msg))", "def handle_list_room(self, lobby_command, client_socket):\n print(\"Handling list command...\")\n msg = ''\n words = lobby_command.split()\n # List all rooms\n if len(words) == 1:\n msg = 'Available Rooms:\\n'\n for room in self.rooms:\n msg += f'\\t\\t{room.name}\\n'\n \n self.just_send(client_socket, msg)\n return\n else:\n # List all rooms and members\n roomname = words[1]\n if roomname == \"all\":\n user = self.clients[client_socket]['data'].decode('utf-8')\n msg = f'All rooms and users:\\n'\n for room in self.rooms:\n msg += f'Room: {room.name}\\nUsers: '\n for user in room.room_attrbts['members']:\n msg += f'\\t{user}'\n if user in room.room_attrbts['admins']:\n msg += ' - Admin'\n msg += '\\n'\n msg += '\\n'\n self.just_send(client_socket, msg)\n return\n\n # List user's room membership\n if roomname == \"mine\":\n user = self.clients[client_socket]['data'].decode('utf-8')\n msg = f'Rooms user {user} has joined:\\n'\n for room in self.rooms:\n if user in room.room_attrbts['members']:\n msg += f'\\t\\t{room.name}'\n if user in room.room_attrbts['admins']:\n msg += ' - Admin'\n msg += '\\n'\n self.just_send(client_socket, msg)\n return\n \n # List membership and active users of a room\n for _room in self.rooms:\n if _room.name == roomname:\n print(\"Request roomname found..\")\n msg = f'User members of room {roomname}:\\n'\n for member in _room.room_attrbts['members']:\n msg += f'\\t\\t{member}\\n'\n msg+= '\\n'\n self.just_send(client_socket, msg)\n \n msg = 'Users active in room:\\n'\n for active_user in _room.room_attrbts['active']:\n msg += f'\\t\\t{active_user}\\n'\n self.just_send(client_socket, msg)\n return\n if msg == '':\n msg = f'Client passed an invalid room to list members of {roomname}\\n'\n self.log_and_send(client_socket, msg)\n return", "def handle_send_to_room(self, lobby_command, client_socket):\n words = lobby_command.split()\n sent_name = words[1]\n sending_user = self.clients[client_socket]['data'].decode('utf-8')\n for room in self.rooms:\n if room.name == sent_name:\n actual_words = words[2:]\n actual_words = ' '.join(actual_words)\n actual_words += '\\n'\n msg = f\"[{sent_name}] {sending_user}: {actual_words}\"\n for client in self.clients:\n found_user = self.clients[client]['data'].decode('utf-8')\n if found_user in room.room_attrbts['members'] and found_user != sending_user:\n self.log_and_send(client, msg)\n print(f\"Successfully sent message to all members of {sent_name}\")\n return\n msg = f\"Could not find room {sent_name} requested by {sending_user}\"\n self.log_and_send(client_socket, msg)\n msg = f\"format for command is $$send [roomname] message\"\n self.log_and_send(client_socket, msg)\n return", "def send_status(self):\n self.data = {\n 'value': '',\n 'state': self.state,\n }\n event_manager.device_changed(self)", "def query_member_status():\n notify_member_status()\n logger.info('signal sent for status report')", "def send_robot_status(self, robot_status):\n self.robot_status_sender.send(robot_status)", "async def rndactivity_add_listening(self, ctx: commands.Context, *, status: str):\n await self._add_status(ctx, status, game_type=2)", "async def status(self, msg, *args):\n content = self.get_status()\n await msg.channel.send(**{\n 'content': content,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })" ]
[ "0.678886", "0.678886", "0.67852986", "0.64390427", "0.6437565", "0.6368543", "0.6348403", "0.6347864", "0.6286434", "0.621969", "0.6199029", "0.61885846", "0.6173507", "0.61667264", "0.61572355", "0.61443347", "0.6096311", "0.6088395", "0.60681", "0.6010695", "0.6003259", "0.60005915", "0.5959819", "0.5950728", "0.5895274", "0.58687425", "0.58558476", "0.58471894", "0.5837419", "0.58292586" ]
0.6819369
0
print all customers with the current time and id in CSV format.
def print_customers(self): output = '' for i in range(len(self.customers)): output += f'Customer no. {self.customers[i].id} is in {self.customers[i].state[0]} section\n' #print(output) with open('oneday.csv','a') as outfile: for i in range(len(self.customers)): outfile.write(f'{self.get_time()};{self.customers[i].id};{self.customers[i].state[0]}\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_customers(self):\n self.current_time = self.get_time()\n return f'Supermarket(\"{self.customers}\", \"{self.current_time}\")'", "def table_info(self):\n for customer in self.customers:\n print(customer.get_name())", "def customerReport(self):\n self._setFormat()\n for cust in self.getCustomerAccountData():\n self.output.write(self.form_line(cust))", "def show_all_customers():\n return cr.show_all_customers()", "def printCsv(self):\n self.printCsvHeader()\n for r in self._records:\n r.printCsv()", "def list_apiscout_csv(self, destination):\n res = self.__make_api_call('list/apiscout/csv', raw=True)\n with open(destination, \"wb\") as csvfile:\n csvfile.write(res)", "def get_all_customer_ids():\n\n # your code", "def get_customers(cls, api, id='', **params):\n return api.get_customers(id, **params)", "def csv(self):\n result = \"\"\n for cpu in self.cpus_current:\n result += str(cpu) + \",\"\n return result", "def fetch_customer_info_full(self, client_id):\n\n try:\n return self._make_private_api_request(\n method=PyttributionIo.GET_REQUEST,\n endpoint='customers',\n subject_id=client_id,\n show_all='true'\n ).get('customer')\n except RequestException as e:\n logger.error('Pyttribution.io: Retrieval of full customer info failed with HTTP status {exception}'.format(\n exception=e))", "def get_all_customers():\n data = user_obj.get_all_customers()\n return data", "def make_csv(user_id, fobj):\n data = show_history(user_id)\n report = csv.writer(fobj)\n report.writerow([\n 'Status',\n 'Date',\n 'Amount',\n 'From Curr',\n 'To Curr',\n 'To Address',\n ])\n for row in data:\n report.writerow([\n row.exchange_status.capitalize(),\n row.created_at.strftime('%Y-%m-%d %H:%I:%M'),\n row.amount,\n row.from_curr,\n row.to_curr,\n row.address_out\n ])", "def get_customer_info(self, customer_id):\n #requested_customer_RDD = self.sc.parallelize(movie_ids).map(lambda x: (user_id, x))\n # Get predicted ratings\n customers = self.__get_customers(customer_id).collect()\n\n return customers", "def get_all_customer_ids():\n table = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_all_customer_ids_from_table(table)", "def getCustomerAccountData(self):\n self.logger.debug(\"\")\n #Process each entry returned by getCustomersInfo through getAccountsInfo.\n customersInfoResponse = self.getCustomersInfo()\n if customersInfoResponse is None:\n self.logger.debug(\"did not get data from self.getCustomersInfo()\")\n raise RuntimeError()\n first = True\n cInfos = self.parseCustomerInfo(customersInfoResponse)\n self.logger.debug(\"%d cInfos\", len(cInfos))\n data = {}\n for cInfo in cInfos:\n if first:\n first = False\n else: # Adds a newline separator for text output.\n self.output.write(self.format({}))\n data['CustomerId'] = cInfo['Id']\n accountsInfoResponse = self.getAccountsInfo(cInfo['Id'], \"true\")\n if accountsInfoResponse is not None:\n data['accounts'] = self.parseAccountInfo(accountsInfoResponse)\n else:\n data['accounts'] = []\n self.logger.debug(\"yield %r\", data)\n yield data", "def print(self):\n df = self.gen_test()\n # print(df)\n df.to_csv('some_dated_file.csv', index=False)\n return df", "def get(self):\n return get_all_customers()", "def show_table(table, has_customer_id=True):\n titles = [\"ID\", \"Title\", \"Price\", \"Date\"]\n if has_customer_id:\n titles.append(\"Customer ID\")\n output_table = [[row[ID], row[TITLE], row[PRICE],\n '/'.join((str(row[YEAR]), str(row[MONTH]), str(row[DAY]))), row[CUSTOMER_ID]] for row in table]\n else:\n output_table = [[row[ID], row[TITLE], row[PRICE],\n '/'.join((str(row[YEAR]), str(row[MONTH]), str(row[DAY])))] for row in table]\n\n ui.clear_scr()\n ui.print_table(output_table, titles, TITLE)", "def PrintAllCosts():\n\n logs.logger.debug(\"After queries of all costs from database start to print\"\n \" the details (id, reg.date, date of payment, amount,\"\n \" description, category) of the costs to the consol.\")\n try:\n printedCosts = session.query(Cost.Cost).all()\n for cost in printedCosts:\n print(\n f'{cost.id}\\t{cost.registrationDate}\\t{cost.dateOfPayment} \\\n \\t{cost.amount}\\t{cost.description}\\t{cost.category.value}')\n logs.logger.info(\"After queries of all costs from database print\"\n \" the details (id, reg.date, date of payment, amount,\"\n \" description, category) of the costs to the consol.\")\n except Exception as e:\n logs.logger.error(e, exc_info=True)", "def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True", "def get_all_customers(connection):\n connection.command_path = \"customers\"\n extra_headers = {connection.header_key: connection.token}\n url = connection.build_url()\n verify_ssl = connection.verify_ssl\n res = requests.get(url=url, headers=extra_headers, verify=verify_ssl)\n if res.status_code > 210:\n return\n body = res.content\n return customers.parse_all_customers(body)", "def list_customers():\n customers = db_helper.get_all_customers()\n return jsonify({\"customers\": customers})", "def return_customers():\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\"SELECT id_customer, login, customer_name, phone, email, perm FROM Customers\")\n return cursor.fetchall()", "def csvprint(self, data):\n import csv\n import sys\n # self._assert(data) CSV data row lenght can vary\n data = self._render(data) # make elements ascii\n writer = csv.writer(sys.stdout, delimiter=',',\n quotechar='\"', \n quoting=csv.QUOTE_MINIMAL,\n lineterminator=\"\\n\")\n for row in data: writer.writerow(row)", "def export_to_csv(self, log):\n if os.path.isfile(self.GENERATE_FILE):\n os.remove(self.GENERATE_FILE)\n\n with open(self.GENERATE_FILE, \"w\") as f:\n f.write(\"date, time, username, succes, label\\n\")\n\n for entry in log:\n f.write(str(entry[0].date()) + \", \"\n + str(self.hms_to_seconds(entry[0])) + \", \"\n + str(entry[1]) + \", \"\n + str(entry[2]) + \", \"\n + str(entry[3])\n + \"\\n\")", "def get_customer_list(self):\n return self._customer_repo.get_customer_list()", "def print_csv(self, items, fields):\r\n writer = csv.writer(sys.stdout)\r\n writer.writerow(fields)\r\n for i in items:\r\n i_fields = [self.string(getattr(i, f)) for f in fields]\r\n writer.writerow(i_fields)", "def customer_list(h):\n global html\n html = h\n\n common_elements = customer_common_elements()\n\n css_list = common_elements[\"css_list\"]\n\n javascript_list = common_elements[\"javascript_list\"]\n\n all_btn = common_elements[\"all_btn\"]\n\n html.new_header(\"Customers\", \"customer_management.py\", all_btn, css_list, javascript_list)\n customer_string = \"\"\"\n <div>\n <table id=\"customers\" cellpadding=\"0\" cellspacing=\"0\" border=\"0\" class=\"display\" style=\"text-align:center\">\n <thead>\n <tr>\n <th>\n Customer Name\n </th>\n <th>\n Customer Group\n </th>\n <th>\n Customer Company\n </th>\n <th>\n Email\n </th>\n <th>\n Mobile\n </th>\n <th>\n Usage\n </th>\n <th>\n Actions\n </th>\n </tr>\n </thead>\n </table>\n </div>\n \"\"\"\n customer_string += \"\"\"\n <script>\n get_customers();\n </script>\n \"\"\"\n html.write(customer_string)\n html.new_footer()", "def write_csv(self, file):\n # Write header row\n file.write('Timestamp,MessageType,Queue,Price,Volume,OrderID\\n')\n # Write content\n for x in self.records:\n row = (str(x[0]) + ',' + x[1][\"MessageType\"] + ',' +\n x[1][\"Queue\"] + ',' + str(x[1][\"Price\"]) + ',' +\n str(x[1][\"Volume\"]) + ',' + str(x[1][\"OrderID\"]) + '\\n')\n file.write(row)", "def fetch_customer_info_events(self, client_id):\n\n try:\n return self._make_private_api_request(\n method=PyttributionIo.GET_REQUEST,\n endpoint='customers',\n subject_id=client_id,\n show_events='true'\n ).get('customer')\n except RequestException as e:\n logger.error(\n 'Pyttribution.io: Retrieval of customer events failed with HTTP status {exception}'.format(exception=e))" ]
[ "0.7302453", "0.6655823", "0.6353772", "0.62918216", "0.6062786", "0.5880602", "0.5728009", "0.572303", "0.57119894", "0.5707167", "0.57021517", "0.570062", "0.5667141", "0.56458884", "0.55236256", "0.5516203", "0.54992557", "0.54658407", "0.5455488", "0.54491806", "0.54466695", "0.54387426", "0.5430722", "0.54126936", "0.5402844", "0.53880274", "0.53790414", "0.5375428", "0.53612816", "0.5355738" ]
0.8107446
0
removes every customer that is not active any more.
def remove_existing_customers(self): for i in range(len(self.customers)): if self.customers[i].is_active() == False: self.customers[i]= 'out' self.customers = [item for item in self.customers if item!='out' ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_existing_customers(self):\n # remove the customers which are not active (.is_active )\n self.to_move = False\n #for cust in self.customers:\n # print(cust.state)\n self.customers = [cust for cust in self.customers if cust.state != 'checkout']\n #if cust.to_move():\n # self.to_move = True", "def test_list_active_users(_list_active_customers):\n for customer in _list_active_customers:\n bo.add_customer(\n customer[0],\n customer[1],\n customer[2],\n customer[3],\n customer[4],\n customer[5],\n customer[6],\n customer[7]\n )\n\n assert bo.list_active_customers() == 4\n\n for customer in _list_active_customers:\n bo.delete_customer(customer[0])\n \n assert bo.list_active_customers() == 0", "def del_all_records():\n delete_alles = Customer.delete().where(Customer.name >= '')\n delete_alles.execute()", "def delete_customer(self, customer_to_del):\n customer_list = self._customer_repo.get_customer_list()\n for customer in customer_list:\n if customer.get_customer_id() == customer_to_del: #Maybe need to find a more efficient way\n customer_list.remove(customer)\n self._customer_repo.overwrite_customer_list(customer_list)\n credit_card_list = self._customer_repo.get_credit_card_list()\n for credit_card in credit_card_list:\n if credit_card.get_customer_id() == customer_to_del: #Maybe need to find a more efficient way\n credit_card_list.remove(credit_card)\n self._customer_repo.overwrite_credit_card_list(credit_card_list)", "def prune(cls):\n keep_ids = cls.objects.distinct(\"channel_id\", \"action\").order_by(\"channel_id\", \"action\", \"-performed\").values_list(\"id\", flat=True)\n cls.objects.exclude(id__in=keep_ids).delete()", "def clean_customer_df(customer_df: pd.DataFrame) -> pd.DataFrame:\n \n # remove customers with more than 20 purchases\n if 'frequency' in customer_df.columns:\n customer_df = customer_df[customer_df.frequency < 20]\n \n return customer_df", "def delete_expired_users(self):\n for profile in self.all():\n if profile.activation_key_expired():\n user = profile.user\n if not user.is_active:\n user.delete()", "def delete_expired_users(self):\r\n for profile in self.all():\r\n if profile.activation_key_expired():\r\n user = profile.user\r\n if not user.is_active:\r\n user.delete()", "def remove_customer(self, loginID):\n try:\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM customercredentials WHERE loginID=%s\"\"\", (loginID,))\n if not self.cursor.fetchone()[0]:\n return False\n self.cursor.execute(\"\"\"DELETE FROM customercredentials WHERE loginID=%s\"\"\", (loginID,))\n self.db.commit()\n self.cursor.execute(\"\"\"DELETE FROM customerpersonal WHERE phone NOT IN \n (SELECT phone FROM customercredentials)\"\"\")\n self.db.commit()\n self.update_book_scores()\n self.update_comment_usefulness()\n return True\n except Exception as e:\n return False", "def remove_inactive_consumers():\n\n THRESHOLD_MINUTES = 5\n\n schema = get_schema()\n for subscription in schema.subscription_type.fields.keys():\n to_remove = []\n for consumer in frappe.cache().hkeys(get_subscription_redis_key(subscription)):\n subscription_info = frappe.cache().hget(\n get_subscription_redis_key(subscription), consumer)\n\n should_remove = True\n if subscription_info.last_ping:\n last_ping = get_datetime(subscription_info.last_ping)\n if last_ping + timedelta(minutes=THRESHOLD_MINUTES) >= now_datetime():\n should_remove = False\n\n if should_remove:\n to_remove.append(consumer)\n\n if len(to_remove):\n frappe.cache().hdel(\n get_subscription_redis_key(subscription), *to_remove)", "def non_activated_account(delete=False):\r\n test_date = datetime.utcnow() - NON_ACTIVATION_AGE\r\n query = DBSession.query(Activation.id).\\\r\n filter(Activation.valid_until < test_date).\\\r\n subquery(name=\"query\")\r\n qry = DBSession.query(User).\\\r\n filter(User.activated.is_(False)).\\\r\n filter(User.last_login.is_(None)).\\\r\n filter(User.id.in_(query))\r\n # Delete the non activated accounts only if it is asked to.\r\n if delete:\r\n for user in qry.all():\r\n DBSession.delete(user)\r\n # If the non activated accounts are not asked to be deleted,\r\n # return their details.\r\n else:\r\n return qry.all()", "def prune(self):\n target_user_ids = self.get_queryset().values_list('id', flat=True)\n exclude_user_ids = SentDrip.objects.filter(date__lt=conditional_now(),\n drip=self.drip_model,\n user__id__in=target_user_ids)\\\n .values_list('user_id', flat=True)\n self._queryset = self.get_queryset().exclude(id__in=exclude_user_ids)", "def delete_expired_users(self):\n for profile in self.all():\n if profile.activation_key_expired():\n user = profile.user\n if not user.is_active:\n user.delete() # Removing the ``User`` will remove the ``RegistrationProfile``, too.", "def list_active_customers():\n with cm.DATABASE.transaction():\n # .select() has a .where() method to specify criteria for searching\n active_customers = cm.Customer.select().where(\n cm.Customer.status == \"Active\").count()\n LOGGER.info(\"Active customers: %s\", active_customers)\n return active_customers", "def remove_expired(cls):\n max_trailers = 10\n current_trailers = cls.get_all(collection='approved_trailers')\n current_trailers.reverse()\n queued_trailers = cls.get_all(collection='queued_trailers')\n\n if len(current_trailers) >= max_trailers and len(queued_trailers) > 0:\n for trailer in current_trailers:\n time_active = trailer.date.timetuple().tm_yday - datetime.now().timetuple().tm_yday\n if time_active >= 14 and len(queued_trailers) > 0:\n cls.move(trailer, 'approved_trailers', 'archived_trailers')\n cls.move(queued_trailers[0], 'queued_trailers', 'approved_trailers')", "def _remove_unconfirmed_transactions(frame):\n\n frame.drop(frame.loc[frame['posted'] == False].index, inplace=True)\n return frame", "def test_list_active_customers(self):\n set_up_db()\n add_customer(*self.test_customer)\n add_customer(customer_id=2, name=\"Clark\", last_name=\"Kent\", home_address=None,\n phone_number=\"228-626-7899\", email=\"[email protected]\",\n status=True, credit_limit=200.00)\n add_customer(customer_id=3, name=\"Diana\", last_name=\"Prince\", home_address=None,\n phone_number=\"587-8423\", email=\"[email protected]\",\n status=False, credit_limit=100.00)\n self.assertEqual(2, list_active_customers())", "def admin_delete_non_activated(request):\r\n UserMgr.non_activated_account(delete=True)\r\n return _api_response(request, {\r\n 'status': True,\r\n 'message': 'Removed non activated accounts'\r\n })", "def action_unselect_all(self):\n for statement in self:\n statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids\n statement_lines.write({'cleared_bank_account': False})\n return True", "def trim_items(self, items):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\t\r\n\t\tif self.transactions:\r\n\t\t\tall_items = set.union(*[self.transactions[u][-1] for u in self.transactions.keys()])\r\n\t\telse:\r\n\t\t\treturn items\r\n\t\t\t\r\n\t\ttmp = items.copy()\r\n\t\t\r\n\t\tfor i in items:\r\n\t\t\tif i in all_items:\r\n\t\t\t\tlogger.debug(\"Removing %r\" % i)\r\n\t\t\t\ttmp.remove(i)\r\n\t\t\t\t\r\n\t\tlogger.debug(\"Exit\")\r\n\t\treturn tmp", "def remove_obsolete_users(self, date_limit):\n for user in User.objects.filter(last_login__lt=date_limit):\n if not ServiceProvider.objects.filter(admins=user):\n self.output(\"Removing user: \" + user.username)\n if not self.list_only:\n user.delete()", "def list_active_customers():\n active_customers = 0\n for customer in cm.Customers:\n if customer.status == \"Active \":\n active_customers += 1\n return active_customers", "def test_list_active_customers(self):\r\n create_empty_db()\r\n add_customer(**user_1)\r\n add_customer(**user_2)\r\n add_customer(**user_3)\r\n self.assertEqual(2, list_active_customers())\r\n drop_db()", "def remove_accounts(self):\n current_creds = self._accounts.copy()\n for creds in current_creds:\n self.remove_account(current_creds[creds].credentials.token,\n current_creds[creds].credentials.url)", "def test_delete_non_activated_accounts(self):\r\n res = self.testapp.delete(\r\n '/api/v1/a/nonactivated?api_key={0}'.format(\r\n self.api_key),\r\n status=200)\r\n data = json.loads(res.body)\r\n self.assertEqual(True, data['status'], \"Status should be True\")\r\n self.assertEqual(u'Removed non activated accounts', data['message'])", "def delete_non_activated_account():\r\n trans = transaction.begin()\r\n UserMgr.delete_non_activated_account()\r\n trans.commit()", "def delete(self, **params):\n return self._api.delete_customer(self.id, **params)", "def filter_by_customer(table, customer):\n transactions = []\n for record in table:\n if record[CUSTOMER_ID] == customer:\n transactions.append(record)\n return transactions", "def remove_all_recs(self):\n return self.storage.clear()", "def remove_users(users_to_remove: list, users_dict: dict,\n end_of_service: str) -> None:\n for reciever in users_to_remove:\n if reciever in users_dict:\n send_message(reciever,\n 'Subscription expired\\n',\n end_of_service,\n users_dict[reciever]['carrier'])\n del users_dict[reciever]" ]
[ "0.82650816", "0.63835925", "0.6293173", "0.6194644", "0.5925333", "0.58057535", "0.57551056", "0.57542336", "0.57448226", "0.5709532", "0.566901", "0.5649718", "0.5645026", "0.5611124", "0.55752677", "0.55348897", "0.5534544", "0.55345047", "0.5498778", "0.54886633", "0.5482617", "0.5480388", "0.54631686", "0.5460978", "0.5440678", "0.5420416", "0.54146725", "0.5390688", "0.5385904", "0.5381045" ]
0.8203715
1
Helper function for creating new `Item` Elements. This is used until we get to InstanceElement, where we then use that class for all of the elements instead instead.
def _new_item(class_name=None): class_name = class_name or "Folder" return ElementTree.Element("Item", attrib={ "class": class_name })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def NewItems(self) -> _n_1_t_7:", "def _createItem(self, rpcObject):\n item = ShowWidgetItem(rpcObject, self)\n return item", "def clone_item(item):\n i = h5Item(item.text(0))\n i.path = item.path\n i.listIndex = item.dataIndex\n i.originalIndex = item.originalIndex\n i.data = item.data\n return i", "def create_item(self, parent, block):\r\n li = util.etree.SubElement(parent, 'li')\r\n self.parser.parseBlocks(li, [block])", "def _item_to_elements_parser(self, item):\n elements = {}\n\n ####### Sad solution - look for better one. #######\n items = [\"data\", \"img\", \"title\", \"link\", \"price\"]\n values = (\"item.p.string.strip()\", 'item.img[\"src\"]', 'item.img[\"alt\"]',\n '''item.find(\"a\", {\"class\":\"detailsLink\"})['href']''',\n '''item.find('strong').string.strip()''')\n for key, value in zip(items, values):\n\n # CONVERT TIME\n # if key == \"data\":\n # try:\n # print (time.strptime(eval(value), \"%d %b\"))\n # except Exception as error:\n # print (error) # time data '5 paz' does not match format '%d %b'\n\n try:\n elements.update({key:eval(value)})\n except (TypeError, AttributeError):\n elements.update({key:None})\n\n\n # print()\n # for key, val in elements.items():\n # print (key, val)\n # print()\n ###################################################\n return elements", "def new_varItem(self):\n newInd = (len(pQt.getTopItems(self)) + 1)\n newItem = QtGui.QTreeWidgetItem()\n newItem.setText(0, str(newInd))\n newItem._treeParent = self\n newItem._wdgtParent = self.treeParent\n newItem.wdgEnabled = self.new_varEnabledWidget()\n newItem.wdgLabel = self.new_varTextWidget()\n newItem.wdgType = self.new_varTypeWidget()\n newItem.wdgValue = self.new_varTextWidget()\n newItem.wdgComment = self.new_varTextWidget()\n return newItem", "def test_create_item(self):\n item = self.item\n\n self.assertTrue(isinstance(item, Item))\n self.assertEqual(item.name, \"Test Item\")", "def _create_node(self, item: Item) -> Dict[str, Any]:\n node = {'text': item.title,\n 'item-id': item.id,\n 'nodes': []}\n icon = self.icon_name(item)\n if icon:\n node['icon'] = 'glyphicon glyphicon-{}'.format(icon)\n node['item_title'] = item.title\n node['item_type'] = item.type\n node['item_note'] = item.note\n node['node_type'] = item.__class__.__name__.lower()\n if isinstance(item, Item):\n meta = self._node_metadata(item)\n creators = item.creators\n if meta is not None:\n node['metadata'] = meta\n res = self._item_mapper.get_resource_name(item)\n if res is None:\n res = self._find_child_resource(item, self.PDF_EXT_REGEXP)\n if res is None:\n res = self._find_child_name(item, self.PDF_FULL_REGEXP)\n if res is not None:\n node['resource'] = res\n if creators is not None:\n if meta is None:\n meta = []\n node['metadata'] = meta\n meta.append(('Creators', ', '.join(map(str, creators))))\n if meta is not None:\n meta.sort()\n return node", "def __init__(self, items, tag):\n self.items = items\n self.tag = tag", "def __init__(self, items):\n self.items = items", "def newElement(self,cls,attrib={}):\n elem = cls(**attrib)\n self.setFreeId(elem)\n if cls==Subtoken:\n self.subtokens[elem.id] = elem\n elif cls==DepToken:\n self.deptokens[elem.id] = elem\n elif cls==RelToken:\n self.reltokens[elem.id] = elem\n elif cls==DepEntity:\n self.depentities[elem.id] = elem\n elif cls==RelEntity:\n self.relentities[elem.id] = elem\n else:\n # It is caller responsibility to add elements to the graph\n pass\n \n return(elem)", "def __init__(self):\n self.elems = {}\n self.items = {}", "def init_items(self):\r\n raise NotImplementedError()", "def _create_node_list(self) -> NodeList:\r\n return NodeList(self)", "def make_item_body(self, item):\n raise NotImplementedError", "def create_items(sender, instance, **kwargs):\n if instance.item_id is None and instance.item is None:\n item = Item()\n if hasattr(instance, 'active'):\n item.active = getattr(instance, 'active')\n item.save()\n instance.item = item", "def make(self, item):\n self.name = item.get(\"name\", \"\")\n self.description = item.get(\"description\", \"\")\n self.type = item.get(\"type\", \"filler\")\n if not isinstance(self.type, str) or self.type is None:\n self.usable = NotUsable\n elif len(self.type) > 1:\n self.set_usable(self.type)\n else:\n self.usable = NotUsable", "def add_item(self, item):\n if item.media_type == '':\n (has_guessed, media_type) = guess_type(item.get_name().lower())\n\n if has_guessed:\n if media_type is not None:\n item.media_type = media_type\n else:\n item.media_type = has_guessed\n else:\n item.media_type = 'application/octet-stream'\n\n if not item.get_id():\n # make chapter_, image_ and static_ configurable\n if isinstance(item, EpubHtml):\n item.id = 'chapter_%d' % self._id_html\n self._id_html += 1\n elif isinstance(item, EpubImage):\n item.id = 'image_%d' % self._id_image\n self._id_image += 1\n else:\n item.id = 'static_%d' % self._id_image\n self._id_image += 1\n\n item.book = self\n self.items.append(item)\n\n return item", "def __init__(self, item_type=None):\n super(List, self).__init__()\n self.item_type = item_type or Type()", "def __init__(self, *args):\n this = _ida_hexrays.new_citem_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def create_widget(self):\n item = QNodeItem(self)\n self.widget = item", "def __init__(self, item=None):\n self.item = item\n self.children = [] #List to hold all child nodes of this Node", "def xml_item(cls, item):\n xml = cls.xml_root_open(item)\n xml += cls.xml_add_links(item)\n xml += cls.xml_dict(item)\n xml += cls.xml_root_close()\n return xml", "def create(cls):\n return BasketItem(code=str(uuid.uuid4()))", "def __init__(self, item):\n self._element = item\n self._leftchild = None\n self._rightchild = None\n self._parent = None", "def Item(self) -> object:", "def Item(self) -> object:", "def __init__(self):\n self._data = PositionalList() # list of Item instances", "def __init__(self, item):\n self._element = item\n self._leftchild = None\n self._rightchild = None\n self._parent = None\n self._height = 0", "def build_chest_item(self):\n\t\tif not self.contents_data: return None\n\t\tconstructor = self.contents_data[ ITEM_CLASS ]\n\t\tkey = self.contents_data[ ITEM_KEY ]\n\t\treturn build_item( constructor, key, 0, 0 )" ]
[ "0.6591493", "0.61576575", "0.58468294", "0.5836948", "0.5780297", "0.5729952", "0.5715711", "0.5708954", "0.5705501", "0.569519", "0.5685359", "0.5655327", "0.5629177", "0.5620245", "0.55971825", "0.559323", "0.5592667", "0.55574316", "0.55425286", "0.55268466", "0.5522246", "0.55165786", "0.55153036", "0.5508994", "0.5496437", "0.54845715", "0.54845715", "0.54572326", "0.5437077", "0.54330385" ]
0.73472065
0
Right now, comment matching is only done to inline comments for simplicity. If a more sophisticated pattern is implemented to pick up block comments, this test can be removed.
def test_does_not_match_block_comments(self): comment = dedent("""\ --[[ Hello, World! --]]""") script = rbxmx.ScriptElement(source=comment) first_comment = script.get_first_comment() assert first_comment is None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_like_a_comment(self):\n self.base_test()", "def test_comments(self):\n\n comment_str = \"# This is a comment\\n# This is another comment\"\n doc = parser.parse(comment_str)\n\n self.assertEqual(len(doc.children()), 2)", "def test_parse_multiline_comment(self):\n source_code = dedent(\"\"\"\\\n /**\n * this is a doc comment that stretches over\n * more than one line\n */\n int main()\n {\n return 0;\n }\n \"\"\")\n result = self.parser.parse(source_code.splitlines())\n assert_equal(result, {\n \"int main()\": (\"this is a doc comment that stretches over \"\n \"more than one line\")})", "def block_comment(self):\n while (\n not (self.peek() == \"*\" and self.peek_next() == \"/\")\n and not self.is_at_end()\n ):\n if self.peek() == \"\\n\":\n self.line += 1\n self.advance()\n\n if self.peek() == \"*\" and self.peek_next() == \"/\":\n self.advance(spaces=2)\n\n return None", "def test_dislike_a_comment(self):\n self.base_test()", "def test_double_comment(self):\n self.compare_tokens(\n \"## papān libbi[belly] (already in gloss, same spelling)\\n\",\n ['COMMENT', 'ID', 'NEWLINE']\n )", "def test_comment(parallel, read_basic):\n table = read_basic(\n \"# comment\\nA B C\\n # another comment\\n1 2 3\\n4 5 6\", parallel=parallel\n )\n expected = Table([[1, 4], [2, 5], [3, 6]], names=(\"A\", \"B\", \"C\"))\n assert_table_equal(table, expected)", "def _is_comment_line(self):\n pattern = re.compile(r\"^(\\s)*(//)+\")\n return pattern.search(self._line)", "def __ingest_c_block_comments(self, line, position):\n\n pos = position\n while self._in_block_comment and pos < len(line):\n if pos + 1 < len(line) and line[pos] == '*' and line[pos + 1] == '/':\n self._in_block_comment = False\n pos += 2\n pos += 1\n return pos - position", "def supports_comment_search(self):\n return False", "def test_display_html_comment(self):\r\n self.context['comment'] = \"<p>Unescaped <b>comment HTML</b></p>\"\r\n self.context['comment_prompt'] = \"<p>Prompt <b>prompt HTML</b></p>\"\r\n self.context['text'] = \"<p>Unescaped <b>text</b></p>\"\r\n xml = self.render_to_xml(self.context)\r\n\r\n # Because the HTML is unescaped, we should be able to\r\n # descend to the <b> tag\r\n xpath = \"//div[@class='block']/p/b\"\r\n self.assert_has_text(xml, xpath, 'prompt HTML')\r\n\r\n xpath = \"//div[@class='block block-comment']/p/b\"\r\n self.assert_has_text(xml, xpath, 'comment HTML')\r\n\r\n xpath = \"//div[@class='block block-highlight']/p/b\"\r\n self.assert_has_text(xml, xpath, 'text')", "def isBlockComment(self, lineData, column):\n return self._getTextType(lineData, column) == 'b'", "def block_comments(code):\n block = list()\n for line in code:\n if bool(line.strip()): # If line is not empty\n if line.strip()[0] == '!': # If the first character of the string is the start of a comment it adds it\n block.append(identify_comment(line))\n elif bool(line.strip()): # If the first character of the string is not the start of a comment or its not empty it exits\n break\n return block", "def test_comments(self):\n fp = FilePath(self.mktemp())\n fp.setContent('something\\n#commented\\ncool')\n self.assertEqual(list(inventoryReader(fp.path)), ['something', 'cool'])", "def testComment(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"comment\")\n\n self.util.stringPropertyTest(self, dis_meta, \"comment\")", "def test_commentline_same_comment_glue(create):\n\n comment = create(CommentItem, Comment)\n line = create(CommentLineItem)\n\n connect(line, line.head, comment)\n glued = allow(line, line.tail, comment)\n assert not glued", "def is_comment(self) -> bool: # pragma: no cover TODO?\n return all(seg.is_comment for seg in self.segments)", "def fParseHTMLComments(self, match):\n before, commenttext, after = match.groups()\n commenttext = self.shelve(commenttext)\n return '<!--%s-->' % commenttext", "def test_unicode_comments(self):\n self._do_test(\n ['Hi there!', 'This is an element in a list of strings.'],\n ensure_binary(dedent(u\"\"\"\n [\n 'Hi there!',\n # This is a comment with ‘sneaky‘ unicode characters.\n 'This is an element in a list of strings.',\n # This is a comment with an obvious unicode character ☺.\n ]\n \"\"\").strip()),\n )", "def test_remove_single_line_comments_annotation():\n\n\tinput_ = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t//comment\n\t\t\t\t//@Test //comment\n\t\t\t\t//comment\n\t\t\t\tline3 \"\"\"\n\n\texpect = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t\n\t\t\t\t//@Test //comment\n\t\t\t\t\n\t\t\t\tline3 \"\"\"\n\n\tassert aunit.remove_single_line_comments(input_) == expect", "def parse_comment(comment: Union[Token, PsuedoToken]) -> str:\n # Happens when there is no documentation comment in the source file for the\n # item.\n spelling = comment.spelling\n if spelling is None:\n return \"\"\n\n # Comments from clang start at the '/*' portion, but if the comment itself\n # is indented subsequent lines will have too much indent.\n # Transform::\n #\n # \"/**\\n * hello some comment\\n * on multiple lines\\n */\"\n #\n # into::\n #\n # \"/**\\n * hello some comment\\n * on multiple lines\\n */\"\n indent = \" \" * (comment.extent.start.column - 1)\n indented_comment = indent + spelling\n dedented_comment = textwrap.dedent(indented_comment)\n\n # Notes on the regex here.\n # Option 1 '\\s?\\*/?'\n # This piece will match comment lines that start with '*' or ' *'.\n # This will also match a trailing '*/' for the end of a comment\n #\n # Option 2 '^/\\*+<?'\n # This will match the start of a comment '/*' and consume any\n # subsequent '*'. This is also meant to catch '/**<' for trailing comments.\n #\n # Option 3 '\\*+/'\n # Matches any and all '*' up to the end of the comment string.\n contents = re.sub(\n r\"^\\s?\\*/?|^/\\*+<?|\\*+/\",\n lambda x: len(x.group(0)) * \" \",\n dedented_comment,\n flags=re.MULTILINE,\n )\n\n contents = textwrap.dedent(contents)\n\n # there may still be left over newlines so only strip those, but leave any\n # whitespaces.\n contents = contents.strip(\"\\n\")\n\n return contents", "def test_code_comment_success(self):\n found = False\n pyint = Interpreter()\n try:\n pyint.run(code=BF_CODE_COMMENT)\n except SystemExit: \n found = True\n self.assertFalse(found)", "def __ingest_c_comment_start(self, line, pos):\n\n if line[pos] == '/' and len(line) > pos + 1:\n if line[pos + 1] == '/':\n return -1\n elif line[pos + 1] == '*':\n self._in_block_comment = True\n return 2\n return 0", "def _parse_comments(reader):\n regex = r'\\s*(#|\\/{2}).*$'\n regex_inline = r'(:?(?:\\s)*([A-Za-z\\d\\.{}]*)|((?<=\\\").*\\\"),?)(?:\\s)*(((#|(\\/{2})).*)|)$'\n\n pipe = []\n for line in reader:\n if re.search(regex, line):\n if re.search(r'^' + regex, line, re.IGNORECASE): continue\n elif re.search(regex_inline, line):\n pipe.append(re.sub(regex_inline, r'\\1', line))\n else:\n pipe.append(line)\n return \"\\n\".join(pipe)", "def test(self, parent, block):\n\n self.match = self.pattern.match(block) if self.pattern is not None else None\n return self.match is not None", "def supports_comment_lookup(self):\n return False", "def parse_space_in_comment(comment):\n max_spaces_dict = {}\n for line in comment:\n if (not line.strip()) or line.find(\" \") == -1:\n # empty line or line do not have spaces in it.\n continue\n max_spaces_dict[line] = max(len(list(v)) for k, v in groupby(line) if k == \" \")\n\n sep = [(line.index(\" \" * count) + count) for line, count in max_spaces_dict.items()]\n sep.sort()\n count_dict = {len(list(v)):k for k, v in groupby(sep)}\n\n if max(count_dict.keys()) < 3:\n return {}, comment\n\n comment_dict = {}\n # more than 3 lines following the same pattern, extract from it.\n sep_position = count_dict[max(count_dict.keys())] - 1\n debug(\"found boundary: %s\" % sep_position)\n\n def line_match_pattern(line, position, prev_line=None, next_line=None, recursive=True):\n \"\"\"\n for a line to match a pattern, its next line or its prev line must\n also match the pattern. Notice that the function would call itself\n to see if its next/prev line matches the pattern. So we used a flag\n to stop it from going deeper into the loop.\n \"\"\"\n if line.strip() and len(line) <= position + 1:\n return False\n if not (line[position] == \" \" and line[position+1] != \" \"):\n # The line itself must match the pattern.\n return False\n if (prev_line is None) and (next_line is None) and recursive:\n print(\"##### Bad way to call this function. ####\")\n return False\n\n if not recursive:\n # If we do not go deeper, then the current line just match the pattern.\n return True\n\n if prev_line and prev_line.strip() and not (line_match_pattern(prev_line, position, recursive=False)):\n return False\n\n if next_line and next_line.strip() and not (line_match_pattern(next_line, position, recursive=False)):\n return False\n\n return True\n\n comment_copy = copy(comment)\n for index, line in enumerate(comment_copy):\n if (not line.strip()) or line.find(\" \") == -1 or len(line) < sep_position:\n # empty line, or line has no space, or line to short.\n continue\n if index == 0:\n if line_match_pattern(line, sep_position, next_line=comment_copy[1]):\n key = line[:sep_position].strip(STRIPS)\n value = line[sep_position:].strip(STRIPS)\n debug(\"space || found %s: %s\" % (key, value))\n comment_dict[key] = value\n comment.remove(line)\n else:\n debug(\"First line, but it does not match\")\n continue\n elif index == len(comment_copy)-1:\n if line_match_pattern(line, sep_position, prev_line=comment_copy[-1]):\n key = line[:sep_position].strip(STRIPS)\n value = line[sep_position:].strip(STRIPS)\n debug(\"space || found %s: %s\" % (key, value))\n comment_dict[key] = value\n comment.remove(line)\n else:\n debug(\"last line, but it does not match\")\n continue\n elif line_match_pattern(line, sep_position, prev_line=comment_copy[index-1], next_line=comment_copy[index+1]):\n key = line[:sep_position].strip(STRIPS)\n value = line[sep_position:].strip(STRIPS)\n debug(\"space || found %s: %s\" % (key, value))\n comment_dict[key] = value\n comment.remove(line)\n return comment_dict, comment", "def assertEqualComment(first: ParserNode, second: ParserNode) -> None: # pragma: no cover\n\n assert isinstance(first, interfaces.CommentNode)\n assert isinstance(second, interfaces.CommentNode)\n\n if not isPass(first.comment) and not isPass(second.comment):\n assert first.comment == second.comment", "def visit_BlockComment(self, node):\n\n self.statement(node, '# ', node.text)", "def run(self):\n # If the comment doesn't match, do nothing.\n if not self.regex.search(self.comment): return 0\n\n # Perform the child actions.\n return super(FilterComment, self).run()" ]
[ "0.69436723", "0.6914159", "0.66925186", "0.6657935", "0.6617428", "0.6434374", "0.64170176", "0.6403656", "0.6363083", "0.62539417", "0.6222357", "0.61914796", "0.61640805", "0.6143505", "0.6138388", "0.61269903", "0.61109936", "0.6103159", "0.6084948", "0.60746026", "0.60703975", "0.60367364", "0.6025472", "0.6012954", "0.59839165", "0.59792507", "0.5964886", "0.5925821", "0.59191823", "0.59135854" ]
0.7008808
0
Return a dict with validation rules for a field Used directly in widget templates
def get_validators_for_field(field): # TODO: Add more validation methods validators = {} if v.validation_includes(field.attr.validator, v.Email): validators['email'] = True if v.validation_includes(field.attr.validator, v.Number): validators['number'] = True if v.validation_includes(field.attr.validator, v.Required): validators['required'] = True if v.validation_includes(field.attr.validator, v.URL): validators['url'] = True if v.validation_includes(field.attr.validator, v.DomainName): validators['hostname'] = True if v.validation_includes(field.attr.validator, v.IPAddress): validators['ip_address'] = True if v.validation_includes(field.attr.validator, v.Min): for validator in field.attr.validator.validators: if isinstance(validator, v.Min): validators['min'] = validator.min_val if v.validation_includes(field.attr.validator, v.Max): for validator in field.attr.validator.validators: if isinstance(validator, v.Max): validators['max'] = validator.max_val if v.validation_includes(field.attr.validator, v.Remote): for validator in field.attr.validator.validators: if isinstance(validator, v.Remote): validators['remote'] = validator.validator_name return validators
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fields_validator():\n\n return validator.BrewerySchema()", "def validate(self):\n for rule in self.get_rules():\n rule.validate(self.get_val())", "def validation_runner(val_funk, field, value, requires):\n if hasattr(requires, '__iter__') and not isinstance(requires, unicode):\n return {field: (val_funk.__name__, requires)} if not val_funk(value, *requires) else {}\n else:\n return {field: (val_funk.__name__, requires)} if not val_funk(value, requires) else {}", "def validations(self):\n return self.container['validations']", "def validate(self):\n for name, field in self._get_fields().items():\n field.validate(getattr(self, name))", "def test_field_rules():", "def get_validator_kwargs(self):\n return {\n 'schema': self.get_validation_schema(),\n }", "def get_field_errors(self, bound_field):\r\n errors = super(NgFormValidationMixin, self).get_field_errors(bound_field)\r\n identifier = format_html('{0}.{1}', self.form_name, self.add_prefix(bound_field.name))\r\n errors_function = '{0}_angular_errors'.format(bound_field.field.__class__.__name__)\r\n try:\r\n errors_function = getattr(VALIDATION_MAPPING_MODULE, errors_function)\r\n potential_errors = types.MethodType(errors_function, bound_field.field)()\r\n except (TypeError, AttributeError):\r\n errors_function = getattr(VALIDATION_MAPPING_MODULE, 'Default_angular_errors')\r\n potential_errors = types.MethodType(errors_function, bound_field.field)()\r\n errors.append(SafeTuple((identifier, '$dirty', '$valid', 'valid', ''))) # for valid fields\r\n errors.extend([SafeTuple((identifier, '$dirty', pe[0], 'invalid', force_text(pe[1])))\r\n for pe in potential_errors])\r\n return errors", "def validate(self):\n return self.validator.validate(self.fields)", "def definition_validator(request):\n return validator(request, DefinitionValidator())", "def additional_validation(self,**kwargs):\n return []", "def field_errors(bound_field):\n seen = []\n errors = {}\n if hasattr(bound_field.field, \"fields\"):\n for idx, subfield in enumerate(bound_field.field.fields):\n key = \"%s_%d\" % (bound_field.auto_id, idx)\n subfield_errors = getattr(subfield.widget, \"errors\", [])\n errors[key] = subfield_errors\n seen.extend(subfield_errors)\n for error in bound_field.errors:\n if error not in seen:\n errors.setdefault(bound_field.auto_id, [])\n errors[bound_field.auto_id].append(error)\n return errors.items()", "def _validate_fields(self, change_fields):\n pass", "def validate(self, value):\n errors = {}\n if self.field:\n if hasattr(value, \"items\"):\n sequence = value.items()\n else:\n sequence = enumerate(value)\n for k, v in sequence:\n try:\n self.field._validate(v)\n except ValidationError as error:\n errors[k] = error.errors or error\n except (ValueError, AssertionError) as error:\n errors[k] = error\n\n if errors:\n field_class = self.field.__class__.__name__\n self.error(f\"Invalid {field_class} item ({value})\", errors=errors)\n # Don't allow empty values if required\n if self.required and not value:\n self.error(\"Field is required and cannot be empty\")", "def get_allowed_width_fields(self, fields=None):\n fields = fields or self.form.fields\n allowed_fields = {}\n for name, field in fields.items():\n if isinstance(field.widget, self.form.label_width_widgets):\n if not isinstance(field.widget, self.form.label_exclude_widgets):\n allowed_fields[name] = field\n return allowed_fields", "def validate(self, value):\n def wrap_keys(key):\n return '{{{0}}}'.format(key)\n\n # Use the parent's handling of required fields, etc.\n super(InterpreterField, self).validate(value)\n f = Formatter()\n keys_found = set(filter(None, [it[1] for it in f.parse(value)]))\n missing_keys = self.required_keys.difference(keys_found)\n if missing_keys:\n prep_keys = map(wrap_keys, missing_keys)\n raise ValidationError(_('Value is missing keys: {0}.'.format(', '.join(prep_keys))))\n\n too_many_keys = keys_found.difference(self.required_keys)\n if too_many_keys:\n prep_keys = map(wrap_keys, too_many_keys)\n raise ValidationError(_('Value has unused keys: {0}.'.format(', '.join(prep_keys))))", "def getValidations(self):\n return self.objectValues('InstrumentValidation')", "def _validate(this, validators):\n for val_key, validator in validators.items():\n if isinstance(val_key, tuple):\n field_value, field_name = (getitem(this, val_key[0], None), val_key[0])\n if field_value != val_key[1]:\n continue\n else:\n _validate(this, validator)\n continue\n else:\n field_value, field_name = (getitem(this, val_key, None), val_key)\n if hasattr(field_value, 'validate') and isinstance(validator, dict):\n field_value.validate(validator)\n setitem(this, 'validation_errors', dict(this.validation_errors, **field_value.validation_errors))\n elif isinstance(validator, dict):\n _validate(getitem(this, val_key, None), validator)\n else:\n val_errors = [validation_runner(funkidator[0], field_name, field_value, funkidator[1:]) for funkidator in validator]\n setitem(this, 'validation_errors', dict(getitem(this, 'validation_errors', {}), **dict((key, val) for item in val_errors for key, val in item.items())))", "def get_conditional_rules(self):\n conditional_rules = []\n\n for field in self.form.get_prep_value():\n\n rules = field['value'].get('rules', None)\n if rules:\n field_id = field['value'].get('field_id', None)\n if field_id:\n rules['field_name'] = field_id\n else:\n rules['field_name'] = clean_form_field_name(field['value']['label'])\n rules['required'] = field['value'].get('required', False)\n rules['field_type'] = field.get('type', None)\n conditions = rules.get('conditions', None)\n if len(conditions):\n for condition in conditions:\n del(condition['id'])\n del(condition['type'])\n condition['field_name'] = clean_form_field_name(condition['value']['field_name'])\n condition['rule'] = condition['value']['rule']\n condition['value'] = condition['value'].get('value', None)\n\n conditional_rules.append(rules)\n\n return conditional_rules", "def fieldValidator(field):\n\n # Catch special fields before trying the standard validators.\n special_names = {\n \"cofog\": cofogValidator,\n \"gfsmExpense\": gfsmExpenseValidator,\n \"gfsmRevenue\": gfsmRevenueValidator,\n \"type\": typeValidator\n }\n if field[\"name\"] in special_names.keys():\n return special_names[field[\"name\"]]\n\n # Dates and datetimes require special treatment; catch those too.\n if field[\"type\"] in [\"date\", \"datetime\"]:\n return dateFieldValidator(field)\n\n # For the rest: default field parsers.\n field_parsers = {\n 'number': float,\n 'integer': int,\n 'int': int,\n 'string': str,\n 'time': lambda x: time.strptime(x, '%H:%M'),\n 'boolean': bool,\n 'binary': base64.b64decode,\n 'object': json.loads,\n 'json': json.loads,\n 'geojson': json.loads,\n 'array': list,\n }\n\n if field[\"type\"] in field_parsers.keys():\n return field_parsers[field[\"type\"]]\n raise ValueError(\"No field validator for field type \" + field[\"type\"])", "def generate_validator(self, t, **kwargs):\n def validator(val, field_name=''):\n if val is None and 'required' in kwargs and not kwargs['required']:\n return True\n elif val is None:\n raise ValidationError('%s: None is not allowed (field required)' % field_name)\n if not isinstance(val, t):\n raise ValidationError('%s: \"%s\" not an instance of %s but an instance of %s' %\n (field_name, val, t, type(val)))\n if isinstance(val, dict):\n check_keys(val) # check against . & $ in keys\n return True\n return validator", "def validate_field(self, field_name, val):\r\n return self._columns[field_name].validate(val)", "def validate(self, request):\n values = {\n 'robot_match_comments':request.POST['robot_match_comments'],\n 'did_foul':'did_foul' in request.POST,\n 'did_technical_foul':'did_technical_foul' in request.POST,\n 'foul_description':request.POST['foul_description'],\n 'did_shoot':'did_shoot' in request.POST,\n 'auto_1':request.POST['auto_1'],\n 'auto_2':request.POST['auto_2'],\n 'auto_3':request.POST['auto_3'],\n 'auto_miss':request.POST['auto_miss'],\n 'teleop_1':request.POST['teleop_1'],\n 'teleop_2':request.POST['teleop_2'],\n 'teleop_3':request.POST['teleop_3'],\n 'teleop_5':request.POST['teleop_5'],\n 'teleop_miss':request.POST['teleop_miss'],\n 'shooting_description':request.POST['shooting_description'],\n 'did_climb':'did_climb' in request.POST,\n 'climb_start':request.POST['climb_start'],\n 'climb_finish':request.POST['climb_finish'],\n 'level_reached':request.POST.get('level_reached'),\n 'frisbees_dumped':request.POST['frisbees_dumped'],\n 'climbing_description':request.POST['climbing_description'],\n 'did_human_load':'did_human_load' in request.POST,\n 'did_ground_load':'did_ground_load' in request.POST,\n 'auto_frisbees_ground_loaded':\\\n request.POST['auto_frisbees_ground_loaded'],\n 'loading_description':request.POST['loading_description'],\n }\n if ((values['did_foul'] or values['did_technical_foul']) and\n not values['foul_description']):\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'Please enter a description of the foul(s) the robot committed',\n new_values\n )\n if values['did_shoot']:\n try:\n values['auto_1'] = int(values['auto_1'])\n values['auto_2'] = int(values['auto_2'])\n values['auto_3'] = int(values['auto_3'])\n values['auto_miss'] = int(values['auto_miss'])\n values['teleop_1'] = int(values['teleop_1'])\n values['teleop_2'] = int(values['teleop_2'])\n values['teleop_3'] = int(values['teleop_3'])\n values['teleop_5'] = int(values['teleop_5'])\n values['teleop_miss'] = int(values['teleop_miss'])\n except ValueError:\n raise ValidationError(\n 'You must enter a number for all of the shooting numbers',\n self.__dict__.copy().update(values)\n )\n if values['did_climb']:\n try:\n values['climb_start'] = int(values['climb_start'])\n values['climb_finish'] = int(values['climb_finish'])\n try:\n values['level_reached'] = int(values['level_reached'])\n except TypeError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'You must select a level the robot climbed too',\n new_values\n )\n values['frisbees_dumped'] = int(values['frisbees_dumped'])\n except ValueError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'All climbing related numbers must be numbers',\n new_values\n )\n if values['did_ground_load']:\n try:\n values['auto_frisbees_ground_loaded'] = int(\n values['auto_frisbees_ground_loaded'])\n except ValueError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'All numbers of frisbees ground loaded must be numbers',\n new_values\n )\n return values", "def validation(self):\n validation_info = {}\n for _doc in self.schema_extension_only['@graph']:\n if \"$validation\" in _doc:\n data = _doc[\"$validation\"]\n if \"definitions\" in _doc[\"$validation\"]:\n data = expand_ref(data, _doc[\"$validation\"][\"definitions\"])\n validation_info[_doc[\"@id\"]] = data\n return validation_info", "def validate(self, source_value):\n errors = defaultdict(list)\n\n for field in self.get_mapping().fields:\n value = get_attribute(source_value, field.name)\n try:\n field.is_valid(value)\n except ValidationError as e:\n errors[field.name].append(e.message)\n\n if errors:\n raise ValidationError(errors)\n else:\n return super(Nested, self).validate(source_value)", "def validate(self, instance, value):", "def validate(self, instance, value):", "def validate_validation_field(self, schema):\n if \"$validation\" in schema:\n if 'properties' not in schema[\"$validation\"]:\n raise KeyError('properties not in $validation field')\n else:\n # validate the json schema\n self.validate_json_schema(schema[\"$validation\"])\n properties = schema[\"$validation\"][\"properties\"].keys()\n # find all parents of the class\n paths = nx.all_simple_paths(self.schema_nx,\n source='http://schema.org/Thing',\n target=schema[\"@id\"])\n parent_classes = set()\n for _path in paths:\n for _item in _path:\n parent_classes.add(_item)\n # loop through all properties and check if the value of\n # domainIncludes belong to one of the parent_classes\n for _property in properties:\n matched = False\n for _record in self.all_schemas:\n if _record[\"rdfs:label\"] == _property:\n domainincludes_value = dict2list(_record[\"http://schema.org/domainIncludes\"])\n for record in domainincludes_value:\n if record[\"@id\"] in parent_classes:\n matched = True\n if not matched:\n raise ValueError('field {} in $validation is not correctly documented'.format(_property))\n else:\n pass", "def get_validate(self):\n return self.validate", "def _custom_validate_fields(self, issues):\n\n common_section = 'basicinfo'\n\n if self.address is None or self.address == '':\n issues.create(section=common_section,\n field='address',\n code='required')\n\n if self.phone is None or self.phone == '':\n issues.create(section=common_section,\n field='phone',\n code='required')\n elif not phonenumber.to_python(self.phone).is_valid():\n issues.create(section=common_section,\n field='phone',\n code='invalid')\n\n if self.psu_email is None or self.psu_email == '':\n issues.create(section=common_section,\n field='psu_email',\n code='required')\n else:\n try:\n EmailValidator()(self.psu_email)\n if not self.psu_email.endswith('@psu.edu'):\n issues.create(section=common_section,\n field='psu_email',\n code='prohibited')\n except ValidationError:\n issues.create(section=common_section,\n field='psu_email',\n code='invalid')\n\n if self.preferred_email == '':\n # preferred_email is assumed to be psu_email if blank\n pass\n else:\n try:\n EmailValidator()(self.preferred_email)\n except ValidationError:\n issues.create(section=common_section,\n field='preferred_email',\n code='invalid')\n\n if self.psu_id is None or self.psu_id == '':\n issues.create(section=common_section,\n field='psu_id',\n code='required')\n elif not re.match(r'^9\\d{8}$', self.psu_id):\n issues.create(section=common_section,\n field='psu_id',\n code='invalid')\n\n if self.major is None or self.major == '':\n issues.create(section=common_section,\n field='major',\n code='required')\n\n if self.semester_initiated is None:\n issues.create(section=common_section,\n field='semester_initiated',\n code='required')\n elif self.semester_initiated > Semester(self.due_at.date()):\n issues.create(section=common_section,\n field='semester_initiated',\n code='invalid')\n elif self.semester_initiated < Semester(('Spring', 1928)):\n issues.create(section=common_section,\n field='semester_initiated',\n code='invalid')\n\n if self.semester_graduating is None:\n issues.create(section=common_section,\n field='semester_graduating',\n code='required')\n elif self.semester_graduating < Semester(self.due_at.date()):\n issues.create(section=common_section,\n field='semester_graduating',\n code='invalid')\n elif self.semester_graduating > Semester(('Fall', 2099)):\n issues.create(section=common_section,\n field='semester_graduating',\n code='invalid')\n\n if self.cumulative_gpa == None:\n issues.create(section=common_section,\n field='cumulative_gpa',\n code='required')\n elif (self.cumulative_gpa < 0.0 or self.cumulative_gpa > 4.0):\n issues.create(section=common_section,\n field='cumulative_gpa',\n code='invalid')\n\n if self.semester_gpa == None:\n issues.create(section=common_section,\n field='semester_gpa',\n code='required')\n elif (self.semester_gpa < 0.0 or self.semester_gpa > 4.0):\n issues.create(section=common_section,\n field='semester_gpa',\n code='invalid')" ]
[ "0.6197543", "0.6169014", "0.61174333", "0.6108308", "0.599177", "0.5980837", "0.5857387", "0.57794267", "0.57696426", "0.5747897", "0.57409066", "0.57206804", "0.56706357", "0.56206083", "0.5611332", "0.5595536", "0.55856764", "0.55637115", "0.5559004", "0.55411786", "0.55006164", "0.5496813", "0.54923064", "0.5438802", "0.5420076", "0.54021186", "0.54021186", "0.5379715", "0.5377903", "0.5370544" ]
0.6690709
0
Returns selected="selected" if the option value matches field's default Used directly in widget templates
def is_option_selected(option, field): if field.attr.default and option[0] == field.attr.default: # and option[0] != self.empty: return ' selected="selected"' else: return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def form_SelectChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(options)\n form['mySelect'].default = 2\n return form", "def form_SelectWithOtherChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectWithOtherChoice(options)\n form['mySelect'].default = 2\n return form", "def selected_value(self):\n option = self.selected_option\n return option.value if option else None", "def select(self, label, component, config, name, options, default=0):\n\n index = self.setting(config, name)\n index = [x for x, option in enumerate(options) if option == default]\n\n # Derive default index\n default = index[0] if index else default\n\n return st.selectbox(label, options, index=default, key=component + name)", "def dropdown_choice(value):\r\n return 'You have selected \"{}\"'.format(value)", "def value(self):\n return self.element.is_selected()", "def prepare_value(self, value):\n if value is None and self.required:\n choices =list(self.choices)\n if len(choices) == 1:\n value = choices[0][0]\n return super(TemplateChoiceField, self).prepare_value(value)", "def form_SelectChoiceWithEmptyString(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.String())\n options = [('','empty string'),('b','b'),('c','c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(options, none_value='BANG')\n return form", "def _coalesceOption(self, name, default = ''):\n return self.view.settings().get(name, self.options.get(name, default))", "def option_is_default(self, opt):\n return opt in self.results and self.results[opt][1] is self._is_default", "def is_select(field):\n return isinstance(field.field.widget, forms.Select)", "def get_selected(self):\n return self.selected", "def form_SelectChoiceNoneOption(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(options,none_option=(None, '--select--'))\n return form", "def selected(self):\n return self._choices[self._selected][0]", "def as_choice(self):\n choices = self.get_setting_choices(self.key, **self.get_kwargs())\n\n if not choices:\n return self.value\n\n for value, display in choices:\n if value == self.value:\n return display\n\n return self.value", "def get_default(field):\n return field.scheme.default is None and SKIP_VALUE or field.scheme.default # noqa", "def is_initially_selected(self, value):\n return value in self._get_selected_values_set()", "def opt(self, key, default=False):\n if key not in self.options:\n return default\n return self.options.get(key)", "def is_default(self):\n\n # Make sure matching default and value cases are found to be\n # equivalent.\n if self.default is None: # empty string should equal None\n current_val = (None if self.value == \"\" else self.value)\n elif isinstance(self.default, str): # avoid str v float comparisons\n current_val = str(self.value)\n else:\n current_val = self.value\n\n # self.template does not contain any information about self.value, so\n # we need to check this separately.\n if current_val != self.default:\n return False\n\n # At this point, self.value is equivalent to self.default, so we should\n # check the remaining attribute defaults defined in self.template.\n default = True\n for attr, val in self.template.items():\n current = getattr(self, attr)\n if current != val:\n default = False\n break\n\n return default", "def is_selected(self):\n return self.container['is_selected']", "def getSelectedOption(self, element_tuple):\n select = Select(self.CORE.find_element(*self.format_element(element_tuple)))\n result = select.first_selected_option\n self.log_info(f\"Browser.getSelectedOption: {element_tuple} is currently set to {result}\")\n return result", "def get_default_value(self):\n pass", "def test_default(self):\r\n self.assertEqual(self.option.default, False)", "def form_RadioChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('myRadio', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['myRadio'].widget = formish.RadioChoice(options)\n form['myRadio'].default = 2\n return form", "def test_default(self):\r\n self.assertEqual(self.option.default, 'testing')", "def _combobox_choice(self, _=None):\n combobox_string = self.value_combobox.var.get()\n if combobox_string.startswith(\"Unknown: \"):\n value = int(combobox_string[len(\"Unknown: \"):])\n else:\n value = int(self.value_combobox.var.get().split(\" \")[0])\n self.master.change_field_value(self.field_name, value)", "def form_RadioChoiceNoneOptionWithDefault(request):\n schema = schemaish.Structure()\n schema.add('myRadio', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form.defaults = {'myRadio':1}\n form['myRadio'].widget = formish.RadioChoice(options,none_option=(None, '--select--'))\n return form", "def get_selected_option_text(self, element):\n if element:\n select = Select(element)\n return select.first_selected_option.text\n else:\n return None", "def has_default_value(self):\n return self.default is not None", "def default(self):\n\n return self._get_field(\"value\")" ]
[ "0.65975666", "0.6416188", "0.6191291", "0.6049021", "0.604218", "0.5977653", "0.597259", "0.5937122", "0.5825413", "0.5796056", "0.5770259", "0.5767324", "0.5749384", "0.5735948", "0.5734999", "0.57267016", "0.56422603", "0.56007797", "0.5600718", "0.55645937", "0.55640537", "0.5559449", "0.55465686", "0.55423236", "0.55393296", "0.5514642", "0.55146396", "0.5509537", "0.5509047", "0.5507111" ]
0.84386253
0
Helper function to plot results from sampling (neighborhood radii or iterations)
def plot_sampling(fname, df, of="r_neighbor", show=True): xlabel = r"Neighborhood $r_{c}$" logx = False if of == "n_iter": xlabel = "#Cycles" logx = True fig, ax = plt.subplots(figsize=(15, 5)) gb = df.groupby([of]) aggregation = {"stress": [np.mean, np.std], "correlation": [np.mean, np.std]} gb = gb.agg(aggregation) gb.stress["mean"].plot(yerr=gb.stress["std"], color="crimson", logx=logx) ax2 = ax.twinx() gb.correlation["mean"].plot(yerr=gb.correlation["std"], color="dodgerblue", logx=logx) ax.set_xlabel(xlabel, fontsize=20) ax.set_ylabel("Stress", fontsize=20) ax.set_ylim(0, 0.2) ax2.set_ylabel(r"Correlation $\gamma$", fontsize=20) ax2.set_ylim(0, 1) plt.savefig(fname, dpi=300, format="png", bbox_inches="tight") if show: plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plotResults(self):\n\n clusters = self.data[[i for i in range(len(self.data)) if self.vorLabels[i] != 0], :]\n vorLabels = [self.vorLabels[i] for i in range(len(self.data)) if self.vorLabels[i] != 0]\n\n self.plot = voronoiPlot(clusters, self.skel, self.skelLabels, self.isCorrect, vorLabels)\n self.plot.snapPlot()", "def problem2():\n k = 4\n total_draws = 20\n total_balls = 50\n\n plt.figure()\n for _ in range(50):\n for num_samples in [10000]:\n experiment_results = []\n for samples in range(num_samples):\n N = np.random.randint(1, k, total_balls - 1)\n N = np.append(N, k)\n N = np.array(N).flatten()\n random.shuffle(N)\n draw = N[:total_draws]\n experiment_result = np.any(draw == 4)\n experiment_results.append(experiment_result)\n plt.plot(np.cumsum(experiment_results) / np.arange(1, num_samples + 1))\n old_result = experiment_results[:]\n\n plt.xlabel('Total Draws')\n plt.ylabel('Probability')\n plt.show()", "def problem1():\n n_i = 10\n k = 5\n num_samples = 1000\n total_draws = 50\n\n plt.figure()\n for num_samples in [100, 1000, 10000]:\n experiment_results = []\n for samples in range(num_samples):\n # N = np.random.randint(1, k + 1, n_i * k)\n N = np.array([[i] * n_i for i in range(1, k+1)]).flatten()\n random.shuffle(N)\n experiment_results_for_sample = []\n for n_draws in range(1, total_draws + 1):\n draw = N[:n_draws]\n experiment_result = check_if_all_nums_in_draw(draw, k)\n experiment_results_for_sample.append(experiment_result)\n experiment_results.append(experiment_results_for_sample)\n experiment_results = np.array(experiment_results)\n\n plt.plot(range(1, total_draws + 1), np.sum(experiment_results, axis=0)/num_samples, label=num_samples)\n\n plt.plot([1, total_draws+1], [0.9, 0.9])\n plt.xlabel('Total Draws')\n plt.ylabel('Probability')\n plt.xlim(1, total_draws)\n plt.legend()\n plt.show()", "def plot_iter(V, Pi, params):\n n_rows = params['n_rows']\n n_cols = params['n_cols'] \n occ_grid = params['occ_grid']\n R = params['R']\n\n goal = params['goal']\n sink = params['sink']\n\n actions = ['left','right','up','down']\n\n fig1 = plt.figure(1, clear=True)\n for row in range(n_rows):\n for col in range(n_cols):\n if occ_grid[row, col] == 1:\n plt.text(col, n_rows - 1 - row, '0.0', color='k', ha='center', va='center')\n elif np.any(np.logical_and(row==sink[:, 0], col==sink[:, 1])):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='r', ha='center', va='center')\n elif np.all([row, col]==goal):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='g', ha='center', va='center')\n else:\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(V[row, col]), \n color='b', ha='center', va='center')\n plt.axis([-1, n_cols, -1, n_rows])\n plt.axis('off')\n\n\n fig2 = plt.figure(2, clear=True)\n for row in range(n_rows):\n for col in range(n_cols):\n if not Pi[row, col] == -1:\n plt.text(col, n_rows - 1 - row, actions[Pi[row, col]], \n color='k', ha='center', va='center')\n elif np.all([row, col]==goal):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='g', ha='center', va='center')\n elif np.any(np.logical_and(row==sink[:, 0], col==sink[:, 1])):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='r', ha='center', va='center')\n plt.axis([-1, n_cols, -1, n_rows])\n plt.axis('off')\n\n fig1.canvas.draw()\n fig1.canvas.flush_events()\n fig2.canvas.draw()\n fig2.canvas.flush_events()", "def _nd_plot_samples(self, **kwargs):\n\n from pesummary.core.plots.plot import _make_comparison_corner_plot as plotfunc\n\n plotkwargs = kwargs.copy()\n\n args = [self._samples]\n plotkwargs[\"corner_parameters\"] = self.parameters\n if \"latex_labels\" not in kwargs:\n plotkwargs[\"latex_labels\"] = self.latex_labels\n\n if \"plot_percentile\" not in kwargs:\n plotkwargs[\"plot_percentile\"] = False\n\n # get ranges for each parameter to set figure axes extents\n if \"range\" not in kwargs:\n range = []\n for param in self.parameters:\n range.append(\n [\n np.min(\n [samps[param].min() for samps in self._samples.values()]\n ),\n np.max(\n [samps[param].max() for samps in self._samples.values()]\n ),\n ]\n )\n plotkwargs[\"range\"] = range\n\n # default to not show quantile lines\n plotkwargs.setdefault(\"quantiles\", None)\n\n # set default injection line color\n plotkwargs.setdefault(\"truth_color\", \"k\")\n\n # set injection parameter values\n if self.injection_parameters is not None:\n injpars = [\n self.injection_parameters[p] - self.parameter_offsets[p]\n for p in self.parameters\n if self.injection_parameters[p] is not None\n ]\n if len(injpars) == self._num_parameters:\n plotkwargs[\"truths\"] = injpars\n\n # create plot\n with DisableLogger():\n fig = plotfunc(*args, **plotkwargs)\n\n # turn frame off on legend\n fig.legends[0].set_frame_on(False)\n\n return fig", "def sample_and_plot(self):\n fig = plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(self.X, self.Y, self.sample(), cmap = plt.cm.jet, rstride = 2, cstride = 2, linewidth = 1)\n plt.show()", "def plot(self):\n\t\tself.plotOfSpect()", "def plot_sample(self):\n print(u'plot_sample()')\n data_set = self.data_sets[1]\n scenario = u'Greedy Search'\n titles = [u'Collaborative Filtering', u'Content-based']\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n for i, rec_type in enumerate(data_set.missions):\n graph = data_set.folder_graphs + rec_type + '_' + str(15) + u'.txt'\n for strategy in Strategy.strategies:\n m = data_set.missions[rec_type][graph][strategy][scenario]\n m.compute_stats()\n ppl.plot(axes[i], np.arange(STEPS_MAX + 1),\n m.stats, label=strategy, linewidth=2)\n axes[i].set_xlabel(u'#Hops')\n axes[i].set_ylabel(u'Success Ratio')\n axes[i].set_ylim(0, 85)\n axes[i].set_xlim(0, STEPS_MAX * 1.01)\n axes[i].set_title(titles[i])\n ppl.legend(axes[i], loc=0)\n\n\n # plt.suptitle(u'Greedy Search on the BookCrossing for N=15',\n # size='xx-large', x=0.5)\n fig.subplots_adjust(left=0.08, right=0.97, top=0.9)\n\n plt.savefig('plots/sample.png')\n plt.savefig('plots/sample.pdf')", "def plot_visualization(path_results, x_data, y_data, variant_mode, nb_classes, signal_test, args):\n\n\t#path_tsne = path_results + \"/Visualization/train/\" + str(args.step) + \"_2d.csv\"\n\t#data_frame = pd.read_csv(path_tsne)\n\t\n\tpath_maping = path_results + \"/Maping/\" + str(args.subject).split(\".txt\")[0] + \"/\"\n\tfilename = path_maping + \"maping_\" + str(args.step) + \"_\" + str(args.subject).split(\".txt\")[0] + \"_stick\" + str(args.stick) + \".png\"\n\n\tprint(\"path_save maping\", path_maping)\n\n\tif not os.path.exists(path_maping):\n\t\tos.makedirs(path_maping)\n\n\t#print(\"path_tsne\", path_tsne)\n\n\tlabel_maping = np.array([10])\n\n\tx_data = np.concatenate((x_data,signal_test),axis=0)\n\ty_data = np.concatenate((y_data,label_maping),axis=0)\n\n\tprint(\"x_data concatenate\",x_data.shape)\n\tprint(\"y_data concatenate\",y_data.shape)\n\n\tdata_frame = tsne_2d(x_data, y_data)\n\n\t\n\t\n\tgroups = data_frame.groupby('label')\n\n\tcluster_names, cluster_colors = get_target_names_dr(nb_classes, args.mode, args, variant_mode)\n\n\tfig = plt.figure(figsize=(20, 10))\n\tax = fig.add_subplot(111)\n\tax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\tfor name, group in groups:\n\t\t\n\t\tif cluster_names[name] == str(args.subject):\n\t\t\tax.scatter(group.x, group.y, marker='D', s=150, edgecolors = 'face',label=cluster_names[name], color=cluster_colors[name])\n\t\telse:\n\t\t\tax.scatter(group.x, group.y, marker='o', label=cluster_names[name], color=cluster_colors[name])\n\n\tax.legend(numpoints=1) #show legend with only 1 point\n\tplt.savefig(filename) #save the plot", "def show_rand_conn(random_conn_parameters):\n\n fig = plt.figure(figsize=(8.5, 8.5))\n ax = fig.add_subplot(111)\n colors = cm.rainbow(np.linspace(\n 0, 1, random_conn_parameters['nb_random_conn']))\n\n for x_pix, z_pix, x_neigh, z_neigh, c in zip(\n random_conn_parameters['x_pixel'], random_conn_parameters['z_pixel'],\n random_conn_parameters['x_neigh'], random_conn_parameters['z_neigh'], colors):\n ax.scatter(x_pix, z_pix, color=c)\n ax.scatter(x_neigh, z_neigh, color=c)\n ax.plot([x_pix, x_neigh], [z_pix, z_neigh], color=c, linewidth=1)\n\n plt.title('Random connectivity')\n ax.set_xlabel('X (pixel)')\n ax.set_ylabel('Z (pixel)')\n plt.show()\n\n return", "def simplexPlot(results):\n _, hole1Values, logProbs = getStatistics(results.items())\n import matplotlib.pyplot as plt\n plt.plot(hole1Values, logProbs)\n plt.show()", "def generation(self,rounds):\n a = []\n b = []\n for i in range(rounds):\n self.fight()\n c = self.avgFitness()\n a.append(c[0])\n b.append(c[1])\n self.sort()\n self.cull()\n self.rePop()\n self.refresh()\n self.fight()\n self.sort()\n print self\n plt.scatter([x for x in range(len(a))],a,color = \"red\")\n plt.scatter([x for x in range(len(b))],b,color = \"green\")\n plt.show()", "def generation(self,rounds):\n a = []\n b = []\n for i in range(rounds):\n self.fight()\n c = self.avgFitness()\n a.append(c[0])\n b.append(c[1])\n self.sort()\n self.cull()\n self.rePop()\n self.refresh()\n self.fight()\n self.sort()\n print self\n plt.scatter([x for x in range(len(a))],a,color = \"red\")\n plt.scatter([x for x in range(len(b))],b,color = \"green\")\n plt.show()", "def make_plot(x,y):", "def plot_r(self):\n for k, v, o in self.data:\n self.plot_r1(k, v, o)", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def plot_distribution(folder: str,\n neat: bool = False,\n neat_gru: bool = False,\n neat_lstm: bool = False,\n neat_sru: bool = False,\n neat_sru_s: bool = False,\n gen: int = 500,\n ):\n # Collect all the populations\n populations = []\n if neat: populations.append(D_NEAT)\n if neat_gru: populations.append(D_NEAT_GRU)\n if neat_lstm: populations.append(D_NEAT_LSTM)\n if neat_sru: populations.append(D_NEAT_SRU)\n if neat_sru_s: populations.append(D_NEAT_SRU_S)\n if len(populations) == 0: return\n \n # Collect all the measure options\n OPTIONS = ['distance', 'finished', 'fitness', 'score', 'time', 'training']\n \n # Go over all possibilities\n print(f\"\\n===> CREATING POPULATION DISTRIBUTIONS <===\")\n path = f\"population_backup/storage/{folder}/\"\n path_images = get_subfolder(path, 'images')\n for option in OPTIONS:\n plt.figure(figsize=(10, 2.5))\n min_val = float(\"inf\")\n max_val = -float(\"inf\")\n for pop in populations:\n d = load_dict(f\"{path}{pop}/evaluation/{option}\")\n dist = d[str(gen)]\n if min(dist) < min_val: min_val = min(dist)\n if max(dist) > max_val: max_val = max(dist)\n \n # Remove outliers first\n dist = sorted(dist)\n q1 = min(dist[int(round(1 / 4 * len(dist)))], dist[int(round(3 / 4 * len(dist)))])\n q3 = max(dist[int(round(1 / 4 * len(dist)))], dist[int(round(3 / 4 * len(dist)))])\n iqr = q3 - q1\n \n for i in range(len(dist) - 1, -1, -1):\n if (dist[i] < (q1 - 1.5 * iqr)) or (dist[i] > (q3 + 1.5 * iqr)): del dist[i]\n sns.distplot(dist,\n hist=False,\n kde=True,\n norm_hist=True,\n bins=100,\n color=COLORS[pop],\n kde_kws={'linewidth': 2},\n label=pop,\n )\n plt.xlim(min_val, max_val)\n # plt.title(f\"Probability density across populations for '{option}' at generation {gen}\")\n plt.xlabel(option)\n # plt.yticks([])\n plt.ylabel('probability density')\n leg = plt.legend(loc='upper center',\n bbox_to_anchor=(0.5, 1.2),\n fancybox=True,\n fontsize=8,\n ncol=len(populations))\n for line in leg.get_lines():\n line.set_linewidth(4.0)\n plt.tight_layout()\n plt.savefig(f\"{path_images}dist_{option}.png\", bbox_inches='tight', pad_inches=0.02)\n plt.savefig(f\"{path_images}dist_{option}.eps\", format='eps', bbox_inches='tight', pad_inches=0.02)\n # plt.show()\n plt.close()", "def plot(self, **kwargs):\n\n # get colors\n colors = kwargs.get(\"colors\", GW_OBSERVATORY_COLORS)\n\n # get Result samples\n self._samples = {\n label: value.posterior\n for label, value in self.results.items()\n if isinstance(value, Result)\n }\n\n # get Grid posteriors\n self._grids = {\n label: [value, value.ln_evidence] # store grid and log evidence\n for label, value in self.results.items()\n if isinstance(value, Grid)\n }\n\n # apply offsets for slightly nicer plots axes\n self.parameter_offsets = {parameter: 0.0 for parameter in self.parameters}\n if len(self._grids) == 0 and len(self._samples) == 1:\n for label in self._samples:\n for parameter in self.parameters:\n srange = [\n np.min(self._samples[label][parameter]),\n np.max(self._samples[label][parameter]),\n ]\n label_suffix = \"\"\n\n # offset values\n median = np.median(self._samples[label][parameter])\n relwidth = np.abs((srange[1] - srange[0]) / median)\n\n if relwidth < 1e-4:\n offsetstr = f\"{median:.4e}\"\n a, b = offsetstr.split(\"e\")\n\n if np.abs(int(b)) < 3:\n offsetstr = f\"{median:.4f}\"\n offset = float(offsetstr)\n else:\n offset = float(offsetstr)\n offsetstr = a + rf\"\\!\\times\\!10^{{{int(b)}}}\"\n\n self.parameter_offsets[parameter] = offset\n\n self._samples[label][parameter] -= offset\n label_suffix = rf\" [${{\\scriptstyle {offsetstr}}}$]\"\n\n self.latex_labels[parameter] += label_suffix\n\n colordicts = []\n for j, res in enumerate([self._samples, self._grids]):\n colordicts.append({})\n for i, key in enumerate(res):\n if key in colors:\n colordicts[-1][key] = colors[key]\n elif key.lower() == \"joint\":\n # if using \"Joint\" as the multi-detector analysis key, set the color to black\n colordicts[-1][key] = \"k\"\n else:\n # use PESummary color cycle\n colordicts[-1][key] = list(colorcycle)[\n (j * 2 + i) % len(colorcycle)\n ]\n\n # store original keywords arguments\n origkwargs = kwargs.copy()\n\n # plot samples\n fig = None\n if len(self._samples) > 0:\n kwargs[\"colors\"] = list(colordicts[0].values())\n if self._num_parameters == 1:\n fig = self._1d_plot_samples(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_samples(**kwargs)\n else:\n fig = self._nd_plot_samples(**kwargs)\n\n # restore keywords\n kwargs = origkwargs\n\n if len(self._grids) > 0:\n kwargs[\"colors\"] = list(colordicts[1].values())\n if fig is not None and \"fig\" not in kwargs:\n kwargs[\"fig\"] = fig\n if self._num_parameters == 1:\n fig = self._1d_plot_grid(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_grid(**kwargs)\n else:\n fig = self._nd_plot_grid(**kwargs)\n\n # add further figure information\n if self._num_parameters == 1:\n ax = fig.gca()\n\n # set figure bounds if outside defaults\n if self.parameters[0] in DEFAULT_BOUNDS:\n _set_axes_limits(ax, self.parameters[0], axis=\"x\")\n\n # add injection values\n if self.injection_parameters is not None:\n if self.injection_parameters[self.parameters[0]] is not None:\n ax.axvline(\n (\n self.injection_parameters[self.parameters[0]]\n - self.parameter_offsets[self.parameters[0]]\n ),\n color=kwargs.get(\"injection_color\", \"k\"),\n linewidth=1,\n )\n elif self._num_parameters == 2:\n if \"triangle\" in self.plottype:\n a1, a2, a3 = fig[1:]\n order = [\"x\", \"y\"] if self.plottype == \"triangle\" else [\"y\", \"x\"]\n params = (\n self.parameters[:2]\n if self.plottype == \"triangle\"\n else self.parameters[1::-1]\n )\n\n # set figure bounds if outside defaults\n for param, axes, axis in zip(params, [[a1, a2], [a2, a3]], order):\n for ax in axes:\n _set_axes_limits(ax, param, axis=axis)\n\n self.fig = fig\n return self.fig", "def plot_onemitexample_R2N_hist_paperfigure(eg_netseed,eg_mitnum,resultsdir='../results/odor_morphs'):\n fig = figure(figsize=(columnwidth,columnwidth/2.0),dpi=300,facecolor='w') # 'none' is transparent\n ax3 = fig.add_subplot(2,3,1)\n ax4 = fig.add_subplot(2,3,2)\n ax5 = fig.add_subplot(2,3,4)\n ax6 = fig.add_subplot(2,3,5)\n ax1 = fig.add_subplot(2,3,3)\n ax2 = fig.add_subplot(2,3,6)\n ## inh = (no_singles,no_joints,no_lat,no_PGs,varyRMP)\n inh_options = [ (0,(False,False,False,False,False),'lat inh') ]\n for ploti,(inhi,inh,inhstr) in enumerate(inh_options):\n R2Ns = []\n lin_R2Ns = []\n chilist = []\n n_accept = 0\n for stimi,stimseed in enumerate(stim_seeds):\n if not salient: net_seeds = [stimseed]\n for neti,netseed in enumerate(net_seeds):\n for ngi,num_gloms in enumerate([3]):\n\n filename, switch_strs \\\n = get_filename(netseed,stimseed,inh,num_gloms,stimi,neti,inhi,resultsdir=resultsdir)\n switches_str = string.join(switch_strs,'')\n ## if the result file for these seeds & tweaks doesn't exist,\n ## then carry on to the next.\n if not os.path.exists(filename): continue\n print filename\n for fitted_mitral in [0,1]:\n ## First the weighted-linear sigmoid:\n ## If the fitted params file does not exist, create it (them).\n if not os.path.exists(filename+'_params'+str(fitted_mitral)):\n print \"fitting file\",filename\n refit = True\n else: refit = False\n ## read in params & responses for this result file\n mit_fit_params = \\\n fit_om.fit_morphs(filename, fitted_mitral, 'arb', refit=refit)\n params,chisq,inputsA,inputsB,fitted_responses,\\\n numavgs,firingbinsmeanList,firingbinserrList = mit_fit_params\n S2N,S2R = forR2N.residual2noise(fitted_responses[-2],firingbinsmeanList[-2],\\\n firingbinserrList[-2]*sqrt(numavgs),starti=0) # odor A\n R2N_A = S2N/S2R\n if isnan(R2N_A): continue\n S2N,S2R = forR2N.residual2noise(fitted_responses[0],firingbinsmeanList[0],\\\n firingbinserrList[0]*sqrt(numavgs),starti=0) # odor B\n R2N_B = S2N/S2R\n if isnan(R2N_B): continue\n R2Ns.append(R2N_A)\n R2Ns.append(R2N_B)\n if netseed == eg_netseed and fitted_mitral == eg_mitnum:\n fit_om.plot_example_onemit(ax3,ax4,eg_mitnum,mit_fit_params)\n \n ## Linear-rectifier or Linear-sigmoid depending on FULLlin variable above.\n ## If the fitted params file does not exist, create it (them).\n if not os.path.exists(filename+'_params'+linextn+str(fitted_mitral)):\n print \"fitting FULLlin file\",filename\n refit = True\n else: refit = False\n ## fit/get the params and responses for this result file\n mit_fit_params = \\\n fit_om.fit_morphs(filename, fitted_mitral, 'lin', refit=refit)\n params,chisq,inputsA,inputsB,fitted_responses,\\\n numavgs,firingbinsmeanList,firingbinserrList = mit_fit_params\n S2N,S2R = forR2N.residual2noise(fitted_responses[-2],firingbinsmeanList[-2],\\\n firingbinserrList[-2]*sqrt(numavgs),starti=0) # odor A\n R2N_A = S2N/S2R\n if isnan(R2N_A): continue\n S2N,S2R = forR2N.residual2noise(fitted_responses[0],firingbinsmeanList[0],\\\n firingbinserrList[0]*sqrt(numavgs),starti=0) # odor B\n R2N_B = S2N/S2R\n if isnan(R2N_B): continue\n lin_R2Ns.append(R2N_A)\n lin_R2Ns.append(R2N_B)\n chilist.append(sqrt(chisq))\n if netseed == eg_netseed and fitted_mitral == eg_mitnum:\n fit_om.plot_example_onemit(ax5,ax6,eg_mitnum,mit_fit_params)\n\n n_accept += 1\n\n R2N_max = 1.0\n ax1.hist(clip(R2Ns,0,R2N_max),20,normed=True,edgecolor='b',facecolor='b')\n _,y1 = ax1.get_ylim()\n ax2.hist(clip(lin_R2Ns,0,R2N_max),20,normed=True,edgecolor='b',facecolor='b')\n #ax2.hist(clip(chilist,0,R2N_max),20,normed=True,edgecolor='b',facecolor='b')\n _,y2 = ax2.get_ylim()\n yR2Nmax = max(y1,y2)\n print \"Number of mitral cells accepted =\",n_accept\n \n ## beautify plots\n for axnum,ax in enumerate([ax1,ax2]):\n xmin,xmax,ymin,ymax = \\\n beautify_plot(ax,x0min=True,y0min=True,xticksposn='bottom',yticksposn='left')\n ax.set_xlim([0,R2N_max])\n ax.set_xticks([0,R2N_max])\n ax.set_ylim([0,yR2Nmax])\n ax.set_yticks([0,yR2Nmax])\n for ax in [ax1,ax3,ax4]:\n ax.set_xticklabels(['',''])\n ## axes_labels() sets sizes of tick labels too.\n axes_labels(ax1,'','prob. density',adjustpos=False,xpad=0,ypad=0)\n ax1.yaxis.set_label_coords(-0.29,-0.3)\n axes_labels(ax2,'$\\sqrt{residual/noise}$','',adjustpos=False,xpad=1,ypad=0)\n\n axes_labels(ax3,'','firing rate (Hz)',adjustpos=False,xpad=0,ypad=0)\n ax3.yaxis.set_label_coords(-0.29,-0.3)\n axes_labels(ax5,'time (s)','',adjustpos=False,xpad=3,ypad=0)\n\n axes_labels(ax4,'','fitted weight',adjustpos=False,xpad=0,ypad=0)\n ax4.yaxis.set_label_coords(-0.24,-0.3)\n axes_labels(ax6,'conc (% SV)','',adjustpos=False,xpad=3,ypad=0)\n\n fig_clip_off(fig)\n fig.tight_layout()\n fig.subplots_adjust(hspace=0.3,wspace=0.5) # has to be after tight_layout()\n fig.savefig('../figures/morphs_R2Ns.svg',dpi=fig.dpi)\n fig.savefig('../figures/morphs_R2Ns.png',dpi=fig.dpi)", "def inclass1():\n import numpy as np\n import matplotlib.pyplot as plt\n\n\n N = 50\n x = np.random.rand(N)\n y = np.random.rand(N)\n colors = np.random.rand(N)\n area = np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radiuses\n\n plt.scatter(x, y, s=area, c=colors, alpha=0.5)\n plt.show()", "def quick_plot(solution):\n plt.suptitle('GNLSE solution')\n\n plt.subplot(1, 2, 1)\n plot_wavelength_vs_distance(solution)\n\n plt.subplot(1, 2, 2)\n plot_delay_vs_distance(solution)\n\n plt.show()", "def perform_example_simulation(mode=\"spherical\", Npackets=10000):\n import matplotlib.pyplot as plt\n\n assert(mode in [\"planar\", \"spherical\"])\n\n J_est = []\n H_est = []\n K_est = []\n for i in range(10):\n logging.info(\"Doing Iteration {:d}\".format(i))\n if mode == \"planar\":\n mcrt = mcrt_grid_planar(Npackets=Npackets)\n else:\n mcrt = mcrt_grid_spherical(Npackets=Npackets)\n J_est.append(mcrt.Jestimator)\n H_est.append(mcrt.Hestimator)\n K_est.append(mcrt.Kestimator)\n\n J_est = np.array(J_est) / mcrt.S\n H_est = np.array(H_est) / mcrt.S\n K_est = np.array(K_est) / mcrt.S\n\n colors = plt.rcParams[\"axes.color_cycle\"]\n labels = [r\"$J$\", r\"$H$\", r\"$K$\"]\n\n x = (mcrt.xl + mcrt.xr) * 0.5 * 1e-5\n\n for y in [mcrt.Janalytic, mcrt.Hanalytic, mcrt.Kanalytic]:\n plt.plot(x, y / mcrt.S, ls=\"dashed\", color=\"black\")\n\n for i, y in enumerate([J_est, H_est, K_est]):\n c = colors[i]\n plt.fill_between(x, y.mean(axis=0) - 2. * y.std(axis=0),\n y.mean(axis=0) + 2. * y.std(axis=0),\n alpha=0.25, color=c)\n plt.fill_between(x, y.mean(axis=0) - y.std(axis=0),\n y.mean(axis=0) + y.std(axis=0),\n alpha=0.5, color=c)\n plt.plot(x, y.mean(axis=0), color=c, marker=\"o\", ls=\"\",\n label=labels[i], markerfacecolor=(1, 1, 1, 0),\n markeredgecolor=c)\n\n plt.legend(frameon=False)\n plt.xlabel(r\"$r$ [km]\")\n plt.ylabel(r\"$J/S$, $H/S$, $K/S$\")\n plt.autoscale(enable=True, axis='x', tight=True)\n plt.show()", "def for_fun():\n k = 10\n total_draws = 35\n total_balls = 40\n n_experiments = 100\n old_result = None\n\n rand_color = randomcolor.RandomColor()\n fig = plt.figure(constrained_layout=False, frameon=False)\n ax = fig.add_axes([0, 0, 1, 1])\n ax.set_facecolor((0.07, 0.07, 0.05))\n\n # for total_draws, color in zip([20, 25, 30], ['red', 'red', 'red']):\n # for total_draws, color in zip([20, 25, 30], ['purple', 'yellow', 'purple']): # mardi gras argyle\n # for total_draws, color in zip([5, 25, 27, 23, 40], ['purple', 'purple', 'blue', 'blue', 'purple']):\n for total_draws, color in zip([20, 3, 5, 10, 35], ['blue', 'red', 'blue', 'purple', 'blue']): # this one is good\n for _ in range(n_experiments):\n for num_samples in [10000]:\n experiment_results = []\n for samples in range(num_samples):\n N = np.random.randint(1, k, total_balls - 1)\n N = np.append(N, k)\n N = np.array(N).flatten()\n random.shuffle(N)\n draw = N[:np.random.randint(total_draws - 3, total_draws + 3)]\n experiment_result = np.any(draw == k)\n experiment_results.append(experiment_result)\n if old_result:\n if np.random.uniform(0, 1) > 0.8:\n luminosity = None\n if color == 'green':\n luminosity = 'bright'\n if color == 'yellow':\n luminosity = 'dark'\n tmp_rgb_color = np.array(rand_color.generate(\n hue=color, luminosity=luminosity, count=1, format_='Array_rgb')) / 256.\n tmp_rgb_color = tmp_rgb_color[0]\n alpha = np.min([np.random.beta(0.01, 0.2), 0.9])\n ax.fill_between(np.arange(1, num_samples + 1),\n np.cumsum(experiment_results) / np.arange(1, num_samples + 1),\n np.cumsum(old_result) / np.arange(1, num_samples + 1),\n alpha=alpha,\n color=tmp_rgb_color)\n if np.random.uniform(0, 1) > 0.95:\n tmp_rgb_color = np.array(rand_color.generate(\n hue=color, luminosity='dark', count=1, format_='Array_rgb')) / 256.\n tmp_rgb_color = tmp_rgb_color[0]\n alpha = np.min([np.random.beta(0.1, 0.2), 0.9])\n linewidth = np.min([np.random.exponential(5.0), 0.9])\n ax.semilogx(np.arange(1, num_samples + 1),\n np.cumsum(experiment_results) / np.arange(1, num_samples + 1),\n alpha=alpha,\n linewidth=linewidth,\n c=tmp_rgb_color)\n old_result = experiment_results[:]\n\n plt.show()", "def plot_sample(x):\n plt.imshow(x[:,:,0])\n plt.title(\"gasf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,1])\n plt.title(\"gadf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,2])\n plt.title(\"mtf\")\n plt.colorbar()\n plt.show()", "def scan2plot(datafolder, start, end, first, last,\n theta_range, theta_bins, chi_range, chi_bins,\n gamma, delta,\n ci, cj, w, h, SDD, pxl_size, ph, d5i=None,\n fraction=1):\n chi_bins = int(chi_bins) # make sure the input is an integer\n theta_bins = int(theta_bins) # make sure the input is an integer\n chi_ax = np.linspace(chi_range[0], \n chi_range[1], chi_bins) # init chi axis\n tth_ax = np.linspace(theta_range[0], \n theta_range[1], theta_bins) # init 2th axis\n int_bin = np.zeros((chi_bins, theta_bins)) # init intensity plot\n tth_weight = np.zeros(theta_bins) # init weight normalization\n # (i.e., the number of times a certain bin has been filled)\n for i in range(first, last + 1):\n print(\"delta = \" + str(delta[i - start]) + \", gamma = \" + \n str(gamma[i - start]) + \n \", status: \" + str(i - start) + \"/\" + \n str(last - first)) # print info on current status\n fname = finder(\"*\" + str(i) + \n \".tif\", datafolder).find() # find image with index i\n with Image.open(fname) as img:\n tth_map, chi_map, PL = angle_maps(gamma[i - start], \n delta[i - start], \n ci, cj, w, h, \n SDD, pxl_size, ph) # angle calculations\n det_img = np.array(img) # convert image to numpy array\n if d5i.any() != None:\n det_img = det_img/(d5i[i - start]) # normalize data to monitor\n det_img /= PL # correct by Lorentz-pol.\n # data binning:\n for j in range(int(h/2*(1-fraction)), int(h/2*(1+fraction))):\n for k in range(int(w/2*(1-fraction)), int(w/2*(1+fraction))):\n # find bin on the 2th axis\n idx = closest(tth_ax, np.rad2deg(tth_map[j][k]))\n # find bin on the chi axis\n jdx = closest(chi_ax, np.rad2deg(chi_map[j][k]))\n # fill bin\n int_bin[jdx][idx] += det_img[j][k]\n # every time a bin is filled add 1 to the weight function\n tth_weight[idx] += 1\n print(\"Done!\")\n return tth_ax, chi_ax, int_bin, tth_weight", "def evaluate(self, plot):", "def plot(self):\n \n \n x_ibs=[] \n x_gss=[]\n y_ibs=[] \n y_gss=[]\n x_pso=[]\n x_bgd=[]\n y_bgd=[]\n y_pso=[]\n x_gd=[]\n y_gd=[]\n \n i=0.0000001\n \n # for k in range(1,51):\n # i= random.uniform(0.00000001, 1)\n # t_avg_ibs=[]\n # t_avg_gss=[]\n # for j in range(1,51):\n #L=random.randint(-100, 0)\n #U=random.randint(0, 100)\n max_iter=self.Max_iter \n L=self.Lower_bound\n U=self.Upper_bound\n \n minima=self.gss(L,U,i,1000)\n #print(\"minima at X = \",minima[1])\n x_ibs.append(self.I_bisection(L,U,minima[1],max_iter)[0])\n x_gss.append(self.gss(L,U,i,max_iter)[0])\n x_pso.append(self.particle_Swarm(self.func, L, U, 2, max_iter)[0])\n x_gd.append(self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)[0])\n x_bgd.append(self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)[0])\n #print(x_pso)\n for i in x_ibs[0]:\n #print(self.Func(i)) \n y_ibs.append(self.Func(i))\n for i in x_gss[0]:\n y_gss.append(self.Func(i)) \n for i in x_pso[0]:\n y_pso.append(self.Func(i)) \n for i in x_gd[0]:\n y_gd.append(self.Func(i)) \n for i in x_bgd[0]:\n y_bgd.append(self.Func(i)) \n #print(y_gss)\n\n plt.plot(x_ibs[0], y_ibs, 'r.')\n plt.plot(x_gss[0], y_gss, '.')\n plt.plot(x_pso[0], y_pso, 'y.')\n #plt.plot(x_gd[0], y_gd, 'y.')\n #plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y')\n \n plt.suptitle('Interval Bisection Search (Red) vs Golden Section Search (Blue) vs Particle swarm optimization (Green)')\n #plt.axis([0, 100, 0.00000001, 1]) \n plt.show()\n plt.plot(x_gd[0], y_gd, 'r.')\n plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y') \n plt.suptitle('Gradient Descent (Red) vs Batch Gradient Descent (Black) ')\n \n plt.show()\n \n start_time = timeit.default_timer()\n ibs=self.I_bisection(L,U,minima[1],max_iter)\n print(\" Execution time for Interval bisection Method is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gss=self.gss(L,U,i,max_iter)\n print(\" Execution time for Golden Section Search is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n pso=self.particle_Swarm(self.func, L, U, 2, max_iter)\n print(\" Execution time for Particle swarm optimization is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gd=self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)\n print(\" Execution time for Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n bgd=self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)\n print(\" Execution time for Batch Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n plt.plot(ibs[1], ibs[2], 'r.')\n plt.text(ibs[1], ibs[2],\"IB\")\n plt.plot(gss[1], gss[2], '.')\n plt.text(gss[1], gss[2],\" GSS\")\n plt.plot(pso[1], pso[2], 'y.')\n plt.text(pso[1], pso[2],\" PSO\")\n plt.plot(gd[1], gd[2], 'g.')\n plt.text(gd[1], gd[2],\" GD \")\n plt.plot(bgd[1],bgd[2], 'k.')\n plt.text(bgd[1], bgd[2],\" Batch_GD\")\n \n plt.xlabel('Value of X')\n plt.ylabel('NUmber of iteration') \n plt.suptitle('Number of iterations vs minimum value of x')\n \n plt.show()", "def rho_true(subj, T, test_set, bbox_ls):\n res = (20,20)\n x = 0.5*(test_set[subj][0,T] + test_set[subj][2,T])\n y = 0.5*(test_set[subj][1,T] + test_set[subj][3,T])\n print \"Rho TRUE\"\n plt.scatter(x, y, s=60, color=\"grey\")\n x_min = x-test_scene.bbox_width/2\n x_max = x+test_scene.bbox_width/2\n y_min = y-test_scene.bbox_width/2\n y_max = y+test_scene.bbox_width/2\n bbox_npy = np.array(bbox_ls)\n x_width_arr = bbox_npy[:,0,0]\n x_pos_arr = bbox_npy[:,1,0]\n y_width_arr = bbox_npy[:,0,1]\n y_pos_arr = bbox_npy[:,1,1]\n pmax = lambda x,y: x*(x>y)+y*(y>x)\n pmin = lambda x,y: x*(x<y)+y*(y<x)\n x_left = pmax(x_min, x_pos_arr - x_width_arr/2.0)\n x_right = pmin(x_max, x_pos_arr + x_width_arr/2.0)\n y_bottom = pmax(y_min, y_pos_arr - y_width_arr/2.0)\n y_top = pmin(y_max, y_pos_arr + y_width_arr/2.0)\n out = (x_right-x_left)*(x_right>x_left)*(y_top-y_bottom)*(y_top>y_bottom)\n out /= test_scene.bbox_width**2\n return out", "def _nd_plot_grid(self, **kwargs):\n\n from matplotlib.lines import Line2D\n from pesummary.core.plots.publication import pcolormesh\n\n # only add to corner plot if plotting on an existing figure\n if \"fig\" not in kwargs:\n raise TypeError(\n \"Can only add Grid results to an existing corner plot showing samples\"\n )\n\n colors = kwargs.pop(\"colors\")\n\n fig = kwargs.pop(\"fig\")\n ax = fig.axes\n\n quantiles = kwargs.pop(\"quantiles\", None)\n grid2d = kwargs.pop(\"grid2d\", False)\n\n for i, (label, grid) in enumerate(self._grids.items()):\n plotkwargs = {}\n plotkwargs[\"color\"] = colors[i]\n plotkwargs[\"label\"] = label\n\n axidx = 0\n for j, param in enumerate(self.parameters):\n x = grid[0].sample_points[param]\n pdf = np.exp(\n grid[0].marginalize_ln_posterior(not_parameters=param) - grid[1]\n )\n\n ax[axidx].plot(x, pdf, **plotkwargs)\n\n if quantiles is not None:\n low, high = self._credible_interval_grid(\n grid[0], param, interval=quantiles\n )\n ax[axidx].axvline(low, color=colors[i], ls=\"--\")\n ax[axidx].axvline(high, color=colors[i], ls=\"--\")\n\n # plot 2D posteriors\n if grid2d:\n meshkwargs = {}\n meshkwargs[\"zorder\"] = kwargs.get(\"zorder\", -10)\n meshkwargs[\"shading\"] = kwargs.get(\"shading\", \"gouraud\")\n\n if \"cmap\" not in kwargs:\n if colors[i] in COLOR_MAP:\n meshkwargs[\"cmap\"] = COLOR_MAP[colors[i]]\n else:\n meshkwargs[\"cmap\"] = kwargs[\"cmap\"]\n\n for k in range(j + 1, self._num_parameters):\n y = grid[0].sample_points[self.parameters[k]]\n density = np.exp(\n grid[0].marginalize_ln_posterior(\n not_parameters=[param, self.parameters[k]]\n )\n - grid[1]\n )\n\n # set orientation of the 2D grid\n p1idx = grid[0].parameter_names.index(param)\n p2idx = grid[0].parameter_names.index(self.parameters[k])\n if p1idx < p2idx:\n # transpose density\n density = density.T\n\n axyidx = axidx + (k - j) * self._num_parameters\n pcolormesh(x, y, density, ax=ax[axyidx], **meshkwargs)\n\n axidx += self._num_parameters + 1\n\n # update the legend\n handles = []\n for legtext, leghandle in zip(\n fig.legends[0].texts, fig.legends[0].legendHandles\n ):\n label = legtext.get_text()\n legcolor = leghandle.get_color()\n\n handles.append(Line2D([], [], color=legcolor, label=label))\n\n for i, label in enumerate(self._grids):\n for line in ax[0].get_lines():\n linecolor = line.get_color()\n # test that colours are the same\n if linecolor == colors[i]:\n handles.append(Line2D([], [], color=linecolor, label=label))\n break\n\n # remove original legend\n fig.legends = []\n\n # re-add legend\n fig.legend(handles=handles, frameon=False, loc=\"upper right\")\n\n return fig", "def plot():\n pass" ]
[ "0.62359834", "0.6225181", "0.6198315", "0.6130711", "0.6028747", "0.6018309", "0.60092574", "0.6005623", "0.6003807", "0.5962413", "0.59510237", "0.5933299", "0.5933299", "0.5864029", "0.5844357", "0.5837657", "0.5837616", "0.58130574", "0.5795679", "0.5788632", "0.57843834", "0.57813805", "0.5778867", "0.5741148", "0.5723994", "0.5723035", "0.5720471", "0.5714361", "0.5689736", "0.5682432" ]
0.63356423
0
Post an answer to the given tweets
def answer_to_tweets(api, tweets): try: last_tweet_id = 0 for tweet in tweets: print("Sending an answer to tweet {}: '{}'".format(tweet["id"], tweet["text"])) api.statuses.update(status=TARGET_TWEET_ANSWER, in_reply_to_status_id=tweet["id"]) last_tweet_id = tweet["id"] time.sleep(1) # do not exceed Twitter limits finally: update_last_tweet_id(last_tweet_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_to_twitter(tweet):\n auth = tweepy.OAuthHandler(\n os.environ['BLADAMADUR_CONSUMER_KEY'],\n os.environ['BLADAMADUR_CONSUMER_SECRET'])\n auth.set_access_token(\n os.environ['BLADAMADUR_ACCESS_TOKEN'],\n os.environ['BLADAMADUR_ACCESS_TOKEN_SECRET'])\n api = tweepy.API(auth)\n\n api.update_status(tweet)", "def tweet(self, twitter_post, instruction):\n if instruction is None:\n logging.error('Instruction parameter missing')\n return TwitterResponse(description='Instruction parameter missing')\n\n if instruction == Instruction.PROCESS_WEATHER_DATA:\n twit_content = \"{}, {} {} C {}\".format(twitter_post.post_text, twitter_post.condition, twitter_post.temperature,\n twitter_post.youtube_url)\n if instruction == Instruction.PROCESS_ARTIST:\n twit_content = \"Requested: {} {}\".format(twitter_post.post_text, twitter_post.youtube_url)\n\n if instruction == Instruction.PROCESS_INSTAGRAM_POST:\n twit_content = twitter_post.post_text\n\n if twitter_post.post_text is None or twitter_post.youtube_url is None:\n return TwitterResponse(description='Twitter post text or youtube_url not resolved!')\n try:\n status = self.api.PostUpdate(twit_content)\n logging.info('Posted twit with status: %s', status)\n return TwitterResponse(status)\n except TwitterError as e:\n logging.error('Error posting twit: %s', e.message[0]['message'])\n return TwitterResponse(description='Fatal error while posting tweet')", "def send_tweet(tweet_text):\n twitter.update_status(status = tweet_text)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def post_to_twitter(worker_responses):\n for worker_response in worker_responses:\n print get_tweet_text(worker_response)", "def post_tweet(self, message):\n twitter = TwitterAPI(\n # os.environ[\"consumerKey\"],\n # os.environ[\"consumerSecret\"],\n # os.environ[\"accessToken\"],\n # os.environ[\"accessTokenSecret\"],\n self.twitter_creds[\"consumerKey\"],\n self.twitter_creds[\"consumerSecret\"],\n self.twitter_creds[\"accessToken\"],\n self.twitter_creds[\"accessTokenSecret\"],\n )\n\n request = twitter.request(\"statuses/update\", {\"status\": message})\n\n status_code = request.status_code\n if status_code == 200:\n rootLogger.info(\"Successfully tweeted: {}\".format(message))\n else:\n rootLogger.error(\"HTTP status code: {} -- unsuccessfully tweeted: {}\".format(status_code, message))", "def reply_to_tweets():\n last_seen_id = retrieve_last_seen_id(FILE_NAME)\n mentions = api.mentions_timeline(\n last_seen_id,\n tweet_mode='extended')\n\n for mention in reversed(mentions):\n print(str(mention.id) + ' - ' + mention.full_text, flush=True)\n last_seen_id = mention.id\n store_last_seen_id(last_seen_id, FILE_NAME)\n for i in range(len(keywords)):\n if keywords[i] in mention.full_text.lower():\n print(\"responding back to: \" + '@' +\n mention.user.screen_name, flush=True)\n api.update_status('@' + mention.user.screen_name + ' ' +\n salts[i], mention.id)", "def send_tweet(data):\n # Fill in the values noted in previous step here\n cfg = get_keys() # grab keys\n api = get_api(cfg) # setup API\n in_reply_to = None\n twitter_url = 'https://twitter.com'\n if data['in_reply_to'] is not None: # if post is reply ...\n for reply in data['in_reply_to']:\n if reply[:len(twitter_url)] == twitter_url: # if the URL points to twitter ...\n in_reply_to = reply.split('/')[-1:] # ... get the status id\n url = 'https://' + DOMAIN_NAME + data['url']\n tweets = post_to_tweets(data=data['content'], url=url) # process string into tweet thread\n # post the first tweet so that we have a status id to start the thread\n status = api.update_status(status=tweets.pop(0), in_reply_to_status_id=in_reply_to)\n first_id = status.id # the id which points to origin of thread\n try:\n lat,lng = data['geo'][4:].split(\",\")\n except KeyError:\n lat, lng = None, None\n for tweet in tweets:\n status = api.update_status(status=tweet, in_reply_to_status_id=status.id)\n return 'http://twitter.com/{name}/status/{id}'.format(name=status.user.screen_name, id=first_id, lat=lat, lng=lng)", "def post_to_tweets(data, url):\n\n print(\"here,\", url)\n\n albums = find_all_images(data['content'])\n text = strip_text(data['content'])\n\n \"\"\"Where applicable, the images are associated with the text. This means, that to make an appropriate thread the\n conversion from a post to tweets should take into account how words relate to images in a spacial way. For this\n reason, we convert to tweets in batches.\"\"\"\n\n cfg = get_keys() # grab keys\n api = get_api(cfg) # setup API\n in_reply_to = None\n twitter_url = 'https://twitter.com'\n\n # for idx, caption in enumerate(text):\n # if idx > 0:\n # url_img = None\n # caption = re.findall(r\"[\\w']+|[.!?;]\\ \", caption)\n # text[idx] = text_to_tweets(caption, url_img)\n\n try:\n if data['in_reply_to'] is not None: # if post is reply ...\n for reply in data['in_reply_to']:\n if reply[:len(twitter_url)] == twitter_url: # if the URL points to twitter ...\n in_reply_to = reply.split('/')[-1:] # ... get the status id\n except KeyError:\n pass\n\n url = 'https://' + DOMAIN_NAME + url\n\n tweets = text_to_tweets(text, url=url) # process string into tweet thread\n\n # try and parse a lat lng.\n try:\n lat, lng = data['geo'][4:].split(\",\")\n except KeyError:\n lat, lng = None, None\n\n # post the first tweet so that we have a status id to start the thread\n status = api.update_status(status=tweets[0].pop(0), in_reply_to_status_id=in_reply_to)\n first_id = status.id # the id which points to origin of thread\n\n for album_group in text:\n try:\n media = album_group.pop(0) # get the corresponding album\n for tweet in album_group:\n status = api.update_with_media(filename=media, status=tweet, in_reply_to_status_id=status.id, lat=lat, long=lng)\n media = None\n except IndexError: # if we're out of albums...\n pass\n return 'http://twitter.com/{name}/status/{id}'.format(name=status.user.screen_name, id=first_id, lat=lat, lng=lng)", "def tweet(api, message):\n status = api.PostUpdate(message)", "def postTweet(self, userId, tweetId):\r\n self.timestamp += 1\r\n self.tweets_by_user[userId].append((self.timestamp, tweetId))", "def post_to_twitter(sender, instance, **kwargs):\n\n if instance.pk: #only post the tweet if it's a new record. \n return False \n \n accounts = TwitterAccount.objects.all()\n \n for account in accounts:\n bittle = Bittle.objects.bitlify(instance.get_absolute_url())\n mesg = \"%s: %s\" % (\"New Blog Post\", bittle.shortUrl)\n username = account.username\n password = account.get_password()\n try:\n twitter_api = twitter.Api(username, password)\n twitter_api.PostUpdate(mesg)\n except urllib2.HttpError, ex:\n print str(ex)\n return False", "def post_tweet():\n if not request.get_json() or 'tweet' not in request.get_json():\n raise exceptions.HttpError(message=\"No tweet info in body\")\n\n post_tweet = Tweet(\n id=None,\n name=None,\n tweet=request.get_json()[\"tweet\"],\n created_at=None,\n type = 'original'\n )\n\n tweet = Storage.post_tweet(tweet=post_tweet)\n return jsonify(tweet.to_dict()), 201", "def retweet(tweet):\n\n twitter.PostRetweet(tweet.id, trim_user=False)\n\n return", "def tweet(text):\n # Twitter authentication\n auth = tweepy.OAuthHandler(C_KEY, C_SECRET)\n auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)\n api = tweepy.API(auth)\n\n # Send the tweet and log success or failure\n try:\n api.update_status(text)\n except tweepy.error.TweepError as e:\n log(e.message)\n print(e.message)\n else:\n log(\"Tweeted: \" + text)\n print(\"Tweeted: \" + text)", "def post_tweet(instance, created, raw, **kwargs):\n if created and not raw:\n _twitter.statuses.update(status='%s %s' % (\n instance.title,\n instance.url,\n ))", "def postTweet(self, userId: int, tweetId: int) -> None:\n self.cnt += 1\n self.posts[userId].append((self.cnt, tweetId))", "def tweet(text):\n # Twitter authentication\n auth = tweepy.OAuthHandler(C_KEY, C_SECRET)\n auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)\n api = tweepy.API(auth)\n\n # Send the tweet and log success or failure\n try:\n api.update_status(text)\n except tweepy.error.TweepError as e:\n log(e.message)\n else:\n log(\"Tweeted: \" + text)", "def send_tweet(auth, tweet, in_reply_to=None):\n\tif isinstance(tweet, list):\n\t\t# It's a thread of tweets\n\t\tprev = ret = None\n\t\tfor part in tweet:\n\t\t\tif not part: continue\n\t\t\tinfo = send_tweet(auth, part, in_reply_to=prev)\n\t\t\tif \"error\" in info: return info\n\t\t\tif not ret: ret = info # Return the info for the *first* tweet sent\n\t\t\tprev = info[\"tweet_id\"]\n\t\treturn ret or {\"error\": \"Can't send a thread of nothing but empty tweets\"}\n\ttwitter = OAuth1Session(config.TWITTER_CLIENT_ID, config.TWITTER_CLIENT_SECRET, auth[0], auth[1])\n\tresp = twitter.post(\"https://api.twitter.com/1.1/statuses/update.json\",\n\t\tdata={\"status\": tweet, \"in_reply_to_status_id\": in_reply_to})\n\tif resp.status_code != 200:\n\t\tprint(\"Unknown response from Twitter\")\n\t\tprint(resp.status_code)\n\t\tprint(\"---\")\n\t\tprint(resp.json())\n\t\tprint(\"---\")\n\t\ttry:\n\t\t\t# TODO: Report these to the front end somehow even if asynchronous\n\t\t\treturn {\"error\": \"Unable to send tweet: \" + resp.json()[\"errors\"][0][\"message\"]}\n\t\texcept LookupError:\n\t\t\treturn {\"error\": \"Unknown error response from Twitter (see server console)\"}\n\tr = resp.json()\n\turl = \"https://twitter.com/%s/status/%s\" % (r[\"user\"][\"screen_name\"], r[\"id_str\"])\n\treturn {\"screen_name\": r[\"user\"][\"screen_name\"], \"tweet_id\": r[\"id\"], \"url\": url}", "async def add_tweet(self, tid=None): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n #print(\"Coordinates: {}\".format(data[\"coordinates\"]))\n if \"place\" in data:\n print(\"Place: {}\".format(data[\"place\"]))\n\n #print(\"User location: {}\".format(data[\"user\"][\"location\"]))\n #print(\"User lang: {}\".format(data[\"user\"][\"lang\"]))\n t=Tweet()\n t.tweet_id = tid\n t = self.fill_tweet(t, data)\n tweet_cache.append(t.to_dict())\n if \"retweeted_status\" in data:\n t.retweeted_status=data[\"retweeted_status\"]\n # \n # save the tweet\n #\n t.upsert()\n #\n # now handle the retweet\n #\n if \"retweeted_status\" in data:\n # this is a retweet so\n # do it once more for the original tweet\n tr=Tweet()\n tr.tweet_id = data[\"retweeted_status\"][\"id_str\"]\n tr = self.fill_tweet(tr, data[\"retweeted_status\"])\n tweet_cache.append(tr.to_dict())\n #tr.upsert()\n #r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #await self.fire_callbacks(r.json())\n #print(t.to_json(),file=ofile)\n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def postTweet(self, userId, tweetId):\n self.time += 1\n self.tweets[userId] = self.tweets.get(userId, []) + [(-self.time, tweetId)]", "def postTweet(self, userId, tweetId):\n if userId in self.tweets:\n self.tweets[userId].append([-self.time, tweetId])\n else:\n self.tweets[userId] = [[-self.time, tweetId]]\n self.time += 1", "def add_tweet_reply(tweet_id, user, text):\n reply = {'user': user, 'text': text}\n return db.tweets.update(\n {'id_str': tweet_id}, {'$push': {'replies': reply}}, True)", "def reply_to_tweet():\n\n print('retrieving and replying to tweets...')\n all_mentions = api.mentions_timeline()\n\n # The content of the reply that the bot will send.\n rap_message = ' yo yo yo yo'\n\n for mention in reversed(all_mentions):\n\n # print(str(mention.id) + '-' + mention.text)\n\n if 'rap for me' in mention.text.lower():\n # checks if the bot received a request to deliver a rap\n print('received a request')\n print('dropping a new single...')\n # Checks if the latest mention came from the same person.\n if mention.id == mention.id[0]:\n # Posts a tweet saying the bot is 'too tired' and won't generate a new rap.\n api.update_status('@' + mention.user.screen_name + ' yo sorry I am too tired right now')\n else:\n # Posts a tweet with the rap to the user.\n api.update_status('@' + mention.user.screen_name + rap_message, mention.id)\n print('single dropped.')", "def command_tweet(self, bot, update):\n\n bot.sendChatAction(update.message.chat_id, action='typing')\n\n tweet = ext.get_last_tweet(self.config['twitter'])\n\n for url in tweet.get('images', []):\n self.send_photo_url(bot, update, url)\n\n messages = [\n u'{text}',\n '[@{user[screen_name]}](https://twitter.com/{user[screen_name]}) '\n '- {ago}'\n ]\n\n for msg in messages:\n self.send_message(bot, update, msg.format(**tweet))", "def tweet(self, message: str) -> None:\n\n # YOUR CODE HERE\n tweet = Tweet(self.userid, date.today(), message)\n self.tweets.append(tweet)", "def postTweet(self, userId: int, tweetId: int) -> None:\n ts = time.time()\n self.posts[userId].append((ts, tweetId))", "def postTweet(self, userId: int, tweetId: int) -> None:\n if userId not in self.users:\n self._create_user(userId)\n self.users_tweet[userId].appendleft((self.uid, tweetId))\n self.uid += 1" ]
[ "0.7315169", "0.720348", "0.70277464", "0.7026013", "0.7026013", "0.7026013", "0.70143557", "0.69740754", "0.6944611", "0.69140863", "0.689736", "0.6822415", "0.6770704", "0.6758333", "0.6744515", "0.6729099", "0.66975725", "0.6697186", "0.6669573", "0.6660224", "0.6641313", "0.66315204", "0.66236836", "0.6622017", "0.66075873", "0.6598528", "0.65884805", "0.6577814", "0.6576524", "0.6544066" ]
0.76214606
0
Retrieve the id of the last tweet we answered to
def get_last_tweet_id(): if not os.path.exists(LAST_TWEET_FILE): return 0 try: with open(LAST_TWEET_FILE, 'rb') as last_tweet_file: return pickle.load(last_tweet_file) except pickle.UnpicklingError: return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maximum_id(tweets):\n try:\n tree = etree.parse(StringIO(tweets), etree.XMLParser())\n statuses = tree.xpath('//statuses')\n id_str = statuses[0].xpath('./status/id/text()')\n ids = []\n for id in id_str:\n ids.append(int(id))\n return str(max(ids))\n\n except IndexError, e:\n raise e\n except ValueError, e:\n raise e", "def __get_last_id(cls):\n db = database.db_connection()\n cursor = db.cursor()\n sql_query = \"SELECT max(id_user) FROM user\"\n cursor.execute(sql_query)\n row = cursor.fetchone()\n cursor.close()\n return int(row[0])", "def get_tweets_to_answer_to(api, last_tweet_id=0):\n\n parameters = {\"screen_name\": TARGET_SCREEN_NAME,\n \"include_rts\": False,\n \"count\": 200}\n if last_tweet_id > 0:\n parameters[\"since_id\"] = last_tweet_id\n\n last_tweets = api.statuses.user_timeline(**parameters)\n tweets_to_answer_to = sorted([tweet for tweet in last_tweets\n if TARGET_TWEET_REGEX.match(tweet[\"text\"])],\n key=lambda t: t[\"id\"])\n\n if last_tweets and not tweets_to_answer_to:\n # update the last tweet id so that we wont consider the same tweets\n # again next time\n update_last_tweet_id(sorted(last_tweets, key=lambda t: t[\"id\"])[-1][\"id\"])\n\n return tweets_to_answer_to", "def guess_next_id(selfie):\n if len(selfie.tweets) < 2:\n # Nuh uh\n return\n\n diffs = selfie.get_diffs()\n deviation = stdev(diffs)\n average = mean(diffs)\n\n maybe_next_diff = random.randint(\n int(average - (0.5 * deviation)),\n int(average + (0.5 * deviation)),\n )\n\n log.info(\n '%s tweets, diff standard deviation: %s'\n % (len(selfie.tweets), deviation),\n )\n\n return selfie.tweets[-1]['id'] + maybe_next_diff", "def get_model_api_last_response_id(self):\n return self._last_response_id", "def most_recent_id(q):\n since_id = None\n last_archive_file = last_archive(q)\n if last_archive_file:\n line = open(last_archive_file).readline()\n if line:\n since_id = json.loads(line)[\"id_str\"]\n return since_id", "def get_last_update_id(updates):\r\n update_ids = []\r\n for update in updates[\"result\"]:\r\n update_ids.append(int(update[\"update_id\"]))\r\n return max(update_ids)", "def latest_id(self):\n return self.checkpoints[-1]", "def get_last_post(verbose=False):\n\n last_post = graph.get_connections(id=\"me\", connection_name=\"feed\")[\"data\"][0]\n\n if verbose:\n logger.info(LOG_PULL_SUCCESS)\n\n return last_post", "def last_id(self):\n rows = self.db.query(\"\"\"\n SELECT LAST_INSERT_ID() AS id\n \"\"\")\n for row in rows:\n return row['id']", "def get_last_update_id(client):\r\n f = client.get_object(Bucket=,\r\n Key='last_update.json')['Body']\r\n # f is a StreamingBody object in json, load to retrieve id number\r\n return json.load(f)['id']", "def get_tweet(self, id):\r\n return self.tweets[id]", "def get_last_text_post(self):\n with self.__connection.cursor() as cursor:\n sql = \"\"\"SELECT * FROM `ow_newsfeed_action`\n WHERE `id`= (SELECT MAX(`id`) FROM `ow_newsfeed_action` WHERE `entityType`=\"user-status\")\n AND `entityType`=\"user-status\"\n \"\"\"\n cursor.execute(sql)\n response = cursor.fetchone()\n data = json.loads(response[\"data\"])[\"status\"]\n return data", "def get_last_replied_id(file):\n f = open(file, 'r')\n last_replied_id = int(f.read().strip())\n f.close()\n return last_replied_id", "def find_latest_id(d, s):\n\n selected_tweets = [t['id'] for t in d if t['search_id'] == s]\n\n if selected_tweets:\n m = max(selected_tweets)\n else:\n m = None\n return m", "def get_current_id(self) -> int:\n try:\n return self.cursor.execute(f\"SELECT MAX(id) FROM {table_locations};\").fetchone()\n except Exception as e:\n msg = f'We faced some problems with the getting last id value. Mistake: {e}'\n self.proceed_error(msg)\n return -1", "def get_last_job_applied_id():\n\n return JobCompletedApplication.query.with_entities(JobCompletedApplication.job_applied_id).order_by(JobCompletedApplication.job_applied_id.desc()).first()[0]", "def update_last_tweet_id(last_tweet_id):\n\n if last_tweet_id:\n with open(LAST_TWEET_FILE, 'wb') as last_tweet_file:\n pickle.dump(last_tweet_id, last_tweet_file)", "def __chat_id_response(self) -> int:\n try:\n fetch_updates = self.__get_updates()\n return fetch_updates[0]['message']['chat']['id']\n except TimeoutError as tm_err:\n print(tm_err)\n sys.exit(1)", "def getLastObjectId(self):\n return self.objId", "def get_latest_dweet():\r\n resource = URL + '/get/latest/dweet/for/' + thing_name # (6)\r\n logger.debug('Getting last dweet from url %s', resource)\r\n\r\n r = requests.get(resource) # (7)\r\n\r\n if r.status_code == 200: # (8)\r\n dweet = r.json() # return a Python dict.\r\n logger.debug('Last dweet for thing was %s', dweet)\r\n\r\n dweet_content = None\r\n\r\n if dweet['this'] == 'succeeded': # (9)\r\n # We're just interested in the dweet content property.\r\n dweet_content = dweet['with'][0]['content'] # (10)\r\n\r\n return dweet_content\r\n\r\n else:\r\n logger.error('Getting last dweet failed with http status %s', r.status_code)\r\n return {}", "def get_largest_id(self):\n try:\n cur = self.conn.execute(\"\"\"SELECT MAX(id) FROM todo;\"\"\")\n row = cur.fetchone()\n if row[0] == None:\n return 0\n else:\n return row[0]\n except Exception as e:\n print(e)", "def get_last_entry_time():\r\n try:\r\n last_entry_time = list(mongo_coll_tweets.find().sort(\r\n [(\"_id\", -1)]).limit(1))[0][\"_id\"].generation_time\r\n except:\r\n last_entry_time = 0\r\n\r\n return last_entry_time", "def get_last_id(index, doctype):\n _query = {\n \"size\": 1,\n \"sort\": {\"created_at\": \"desc\"},\n \"query\": {\n \"match_all\": {}\n }\n }\n _filter_path = 'hits.hits._id'\n try:\n result = es.search(\n index=index,\n doc_type=doctype,\n body=_query,\n filter_path=_filter_path)\n except ConnectionError as error:\n raise error\n except RequestError as error:\n return 0\n\n try:\n return int(result['hits']['hits'][0]['_id'])\n except (TypeError, KeyError):\n return 0", "def get_last_id(obj, session):\n try:\n return session.query(obj).order_by(\"-id\").first().id\n except AttributeError: # This will be thrown for no entries\n return 0", "def GetTwitterHandleFromID(self, idn):\n # Add random.choice conditional to pick between a few different sites, so we don't get banned later.\n self.request = self.GetRequest(\"http://twopcharts.com/idcheck?user={}&type=id\".format(idn)).content\n self.reponse_list = self.request.split()\n\n for item in self.reponse_list:\n if 'href=\"tweettimes/' in item.strip():\n item = item.strip()\n self.next_item = x.replace('\"><button', \"\").replace('href=\"tweettimes/', \"\")\n return self.next_item.split()[0]", "def retrieve_last_seen_id(file_name):\n f_read = open(file_name, 'r')\n if os.stat(file_name).st_size != 0:\n last_seen_id = int(f_read.read().strip())\n else:\n print(\"Empty last_seen_id! Using an old mention ID\")\n last_seen_id = 1127325363376394243\n # 1127325363376394243 -- bitterMelon20: @bitterMelon20 42\n f_read.close()\n return last_seen_id", "def last_updated_by_id(self) -> str:\n return self.__last_updated_by_id", "def get_last_text_status(self):\n with self.connection.cursor() as cursor:\n sql = \"\"\"SELECT * FROM `ow_newsfeed_action` \n WHERE `id`= (SELECT MAX(`id`) FROM `ow_newsfeed_action` WHERE `entityType`=\"user-status\")\n AND `entityType`=\"user-status\"\n \"\"\"\n cursor.execute(sql)\n response = cursor.fetchone()\n data = json.loads(response[\"data\"])\n\n self.connection.commit()\n print(data[\"statusId\"])\n return Status(text=data[\"status\"],id=data[\"statusId\"])", "def get_current_id(self):\n\n id = self.ids[-1]\n\n if id is None:\n raise KeyError()\n\n return id" ]
[ "0.6557923", "0.65007925", "0.6485662", "0.6465227", "0.64261043", "0.63394284", "0.6325243", "0.63116455", "0.6201186", "0.61996835", "0.61993325", "0.61968505", "0.61273897", "0.6119263", "0.60918725", "0.608339", "0.60476357", "0.60136485", "0.6002097", "0.5997282", "0.59745145", "0.59587824", "0.5946047", "0.5943557", "0.594299", "0.590561", "0.588825", "0.5887305", "0.58827454", "0.588046" ]
0.7113535
0
Update the id of the last tweet the bot considered
def update_last_tweet_id(last_tweet_id): if last_tweet_id: with open(LAST_TWEET_FILE, 'wb') as last_tweet_file: pickle.dump(last_tweet_id, last_tweet_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_tweet(self, twitter) -> None:\n if isinstance(twitter, dict):\n json_data = twitter\n else:\n json_data = json.loads(twitter)\n\n try:\n breakpoint()\n self.db.tweets.find_one_and_update(\n {'id_str': json_data['id_str']},\n {'$inc': {'seq': 1}},\n projection={'seq': True, '_id': False},\n upsert=True,\n )\n except Exception as e:\n log.error(e)", "async def add_tweet(self, tid=None): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n #print(\"Coordinates: {}\".format(data[\"coordinates\"]))\n if \"place\" in data:\n print(\"Place: {}\".format(data[\"place\"]))\n\n #print(\"User location: {}\".format(data[\"user\"][\"location\"]))\n #print(\"User lang: {}\".format(data[\"user\"][\"lang\"]))\n t=Tweet()\n t.tweet_id = tid\n t = self.fill_tweet(t, data)\n tweet_cache.append(t.to_dict())\n if \"retweeted_status\" in data:\n t.retweeted_status=data[\"retweeted_status\"]\n # \n # save the tweet\n #\n t.upsert()\n #\n # now handle the retweet\n #\n if \"retweeted_status\" in data:\n # this is a retweet so\n # do it once more for the original tweet\n tr=Tweet()\n tr.tweet_id = data[\"retweeted_status\"][\"id_str\"]\n tr = self.fill_tweet(tr, data[\"retweeted_status\"])\n tweet_cache.append(tr.to_dict())\n #tr.upsert()\n #r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #await self.fire_callbacks(r.json())\n #print(t.to_json(),file=ofile)\n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def answer_to_tweets(api, tweets):\n\n try:\n last_tweet_id = 0\n for tweet in tweets:\n print(\"Sending an answer to tweet {}: '{}'\".format(tweet[\"id\"],\n tweet[\"text\"]))\n api.statuses.update(status=TARGET_TWEET_ANSWER,\n in_reply_to_status_id=tweet[\"id\"])\n last_tweet_id = tweet[\"id\"]\n time.sleep(1) # do not exceed Twitter limits\n finally:\n update_last_tweet_id(last_tweet_id)", "def postTweet(self, userId, tweetId):\n self.time += 1\n self.tweets[userId] = self.tweets.get(userId, []) + [(-self.time, tweetId)]", "def postTweet(self, userId: int, tweetId: int) -> None:\n self.timeStamp -= 1\n self.tweetTimeLine[userId] = self.tweetTimeLine.get(userId, []) + [[self.timeStamp, tweetId]]", "def postTweet(self, userId, tweetId):\r\n self.timestamp += 1\r\n self.tweets_by_user[userId].append((self.timestamp, tweetId))", "def predict_the_future(selfie, next_id):\n if next_id:\n next_link = 'https://twitter.com/%s/status/%s' % (selfie.username, next_id)\n message = 'BEHOLD! A link to this very tweet! %s' % next_link\n elif not selfie.tweets:\n message = 'First?'\n else:\n message = 'Second?'\n\n tweet = selfie.api.statuses.update(status=message)\n\n # Delete the tweet if it was a throwaway or it didn't work.\n if next_id is None or tweet['id'] != str(next_id):\n selfie.garbage.append(tweet)\n\n return tweet", "def postTweet(self, userId: int, tweetId: int) -> None:\n self.tweets[userId].appendleft((next(self.timer), tweetId))", "def postTweet(self, userId: 'int', tweetId: 'int') -> 'None':\n self.tweets[userId].appendleft((next(self.timer), tweetId))", "def postTweet(self, userId, tweetId):\n if userId in self.tweets:\n self.tweets[userId].append([-self.time, tweetId])\n else:\n self.tweets[userId] = [[-self.time, tweetId]]\n self.time += 1", "def holy_crap(selfie, tweet_id):\n tweet_url = 'https://twitter.com/%s/status/%s' % (selfie.username, tweet_id)\n message = 'Holy crap!!! I did it!!! %s' % tweet_url\n selfie.api.statuses.update(status=message)\n\n log.info('I DID IT!')", "def reply_to_tweet():\n\n print('retrieving and replying to tweets...')\n all_mentions = api.mentions_timeline()\n\n # The content of the reply that the bot will send.\n rap_message = ' yo yo yo yo'\n\n for mention in reversed(all_mentions):\n\n # print(str(mention.id) + '-' + mention.text)\n\n if 'rap for me' in mention.text.lower():\n # checks if the bot received a request to deliver a rap\n print('received a request')\n print('dropping a new single...')\n # Checks if the latest mention came from the same person.\n if mention.id == mention.id[0]:\n # Posts a tweet saying the bot is 'too tired' and won't generate a new rap.\n api.update_status('@' + mention.user.screen_name + ' yo sorry I am too tired right now')\n else:\n # Posts a tweet with the rap to the user.\n api.update_status('@' + mention.user.screen_name + rap_message, mention.id)\n print('single dropped.')", "def postTweet(self, userId, tweetId):\n if not self.user_pool[userId].user_id:\n user = UserInfo()\n user.user_id = userId\n self.user_pool[userId] = user\n self.time += 1\n tw_info = (tweetId, self.time) # 保存一个和时间戳\n self.twitter_pool[userId].append(tw_info)", "def postTweet(self, userId: int, tweetId: int) -> None:\n if userId not in self.users.keys():\n self.users[userId] = user()\n self.users[userId].tweets.append(tweetId)\n self.tweetTime[tweetId] = self.time\n self.time += 1", "def postTweet(self, userId: int, tweetId: int) -> None:\n # Time Complexity: O(1)\n if userId not in self.tweets:\n self.tweets[userId] = []\n\n self.tweets[userId].append((-self.timestamp, tweetId))\n self.timestamp += 1", "def guess_next_id(selfie):\n if len(selfie.tweets) < 2:\n # Nuh uh\n return\n\n diffs = selfie.get_diffs()\n deviation = stdev(diffs)\n average = mean(diffs)\n\n maybe_next_diff = random.randint(\n int(average - (0.5 * deviation)),\n int(average + (0.5 * deviation)),\n )\n\n log.info(\n '%s tweets, diff standard deviation: %s'\n % (len(selfie.tweets), deviation),\n )\n\n return selfie.tweets[-1]['id'] + maybe_next_diff", "def get_tweets_to_answer_to(api, last_tweet_id=0):\n\n parameters = {\"screen_name\": TARGET_SCREEN_NAME,\n \"include_rts\": False,\n \"count\": 200}\n if last_tweet_id > 0:\n parameters[\"since_id\"] = last_tweet_id\n\n last_tweets = api.statuses.user_timeline(**parameters)\n tweets_to_answer_to = sorted([tweet for tweet in last_tweets\n if TARGET_TWEET_REGEX.match(tweet[\"text\"])],\n key=lambda t: t[\"id\"])\n\n if last_tweets and not tweets_to_answer_to:\n # update the last tweet id so that we wont consider the same tweets\n # again next time\n update_last_tweet_id(sorted(last_tweets, key=lambda t: t[\"id\"])[-1][\"id\"])\n\n return tweets_to_answer_to", "def get_tweet(self, id):\r\n return self.tweets[id]", "def RT(ID, name):\r\n \"\"\"Takes a ID and username parameter\"\"\"\r\n \"\"\"Once tweeted log is updated in overall and to date tweetlog\"\"\"\r\n \r\n config = config_create()\r\n print(\"RT\")\r\n #Tid = int(float(ID))\r\n Tweetusername = config.get('Auth', 'botname')\r\n #TweetText = 'https://twitter.com/'+Tweetusername+'/status/'+ID\r\n #ReTweet = 'Hi I am ComicTweetBot!('+tim+') I Retweet Comics! Use #comicretweetbot '+TweetText\r\n x2 = config.get('Latest_Log', 'currenttweetlog')\r\n x3 = config.get('Latest_Log', 'overalllog')\r\n CONSUMER_KEY = config.get('Auth', 'CONSUMER_KEY') \r\n CONSUMER_SECRET = config.get('Auth', 'CONSUMER_SECRET')\r\n ACCESS_KEY = config.get('Auth', 'ACCESS_KEY')\r\n ACCESS_SECRET = config.get('Auth', 'ACCESS_SECRET')\r\n api = Twython(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET)\r\n tMax = int(float(config.get('Tweet_Delay', 'Max')))\r\n tMin = int(float(config.get('Tweet_Delay', 'Min')))\r\n tStep = int(float(config.get('Tweet_Delay', 'Step')))\r\n Log = open(x2, 'w')\r\n enterlog = ID+' '+name+ '\\n'\r\n Log.write(enterlog)\r\n Log2 = open(x3, 'w')\r\n Log2.write(ID+'\\n')\r\n #api.update_status(status= ReTweet)\r\n api.retweet(id = ID)\r\n api.create_favorite(id=ID, include_entities = True)\r\n #randomize the time for sleep 1.5mins to 5 mins\r\n rant = random.randrange(tMin, tMax, tStep)\r\n time.sleep(rant)", "def set_last_update_id(client, id):\r\n # json format is used in case more key-value pairs need to be stored\r\n body = {'id': id}\r\n json_body = json.dumps(body)\r\n client.put_object(ACL='private',\r\n Bucket=untappd_bucket,\r\n Key='last_update.json',\r\n Body=json_body)", "def add_retweet(id):\r\n # if original tweet does not exist -> 404\r\n models.Tweet.query.get_or_404(id)\r\n\r\n retweet = models.Retweet(post_id=id, username=request.json['username'],\r\n timestamp=datetime.datetime.now())\r\n\r\n db.session.add(retweet)\r\n db.session.commit()\r\n\r\n return {'retweet_id': retweet.retweet_id}", "async def tweepy_on_status(self, tweet):\n self.processed_tweets += 1\n if self.skip_tweet(tweet):\n return\n\n chan_conf = dutils.get(self.conf.follows, id=tweet.author.id_str)\n try:\n embed = await self.prepare_embed(tweet)\n content = None\n except:\n embed = None\n content = 'Failed to prepare embed for ' + tweet.tweet_web_url # If the preparation failed before setting tweet.tweet_web_url imma kms\n log.error('Failed to prepare embed for ' + str(tweet._json))\n\n # Make sure we're ready to send messages\n await self.bot.wait_until_ready()\n\n for channel in chan_conf.discord_channels:\n discord_channel = self.bot.get_channel(channel.id)\n\n # Check if the channel still exists\n if discord_channel is None:\n log.error('Channel {} unavailable to display tweet {}.'.format(discord_channel.id, tweet.id_str))\n continue\n\n # Check for required permissions\n perms = discord_channel.permissions_for(discord_channel.server.me)\n if not perms.embed_links:\n log.warning('Improper permissions in channel {} to display tweet {}.'.format(discord_channel.id, tweet.id_str))\n try:\n warning = '\\N{WARNING SIGN} Missed tweet from {} : `Embed links` permission missing. \\N{WARNING SIGN}'.format(tweet.author.screen_name)\n await self.bot.send_message(discord_channel, warning)\n except discord.DiscordException as e:\n log.error('Could not send warning to channel {}.\\n{}'.format(discord_channel.id, e))\n continue\n\n # Send the embed to the appropriate channel\n log.debug('Scheduling Discord message on channel ({}) : {}'.format(channel.id, tweet.text))\n await self.bot.send_message(discord_channel, content=content, embed=embed)\n\n # Update stats and latest id when processing newer tweets\n if tweet.id > chan_conf.latest_received:\n channel.received_count += 1\n chan_conf.latest_received = tweet.id\n self.conf.save()", "async def tweet_feeder(self): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n t=Tweet()\n t.tweet_id = data[\"tweet_id\"]\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user_screenname\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n\n try:\n t.user_screenname=data[\"user_screenname\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = data[\"timestamp\"]\n except:\n t.timestamp = datetime.datetime.utcnow()\n tweet_cache.append(t.to_dict())\n \n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def send_tweet(tweet_text):\n twitter.update_status(status = tweet_text)", "def tweet(api, message):\n status = api.PostUpdate(message)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def get_last_tweet_id():\n\n if not os.path.exists(LAST_TWEET_FILE):\n return 0\n\n try:\n with open(LAST_TWEET_FILE, 'rb') as last_tweet_file:\n return pickle.load(last_tweet_file)\n except pickle.UnpicklingError:\n return 0", "def idme(bot, update):\n update.message.reply_text(\"Your ID is: \" + str(update.message.from_user.id))" ]
[ "0.6783095", "0.6732734", "0.671317", "0.6659954", "0.65241224", "0.6519562", "0.6500197", "0.6424114", "0.6419715", "0.6397454", "0.6346836", "0.63047737", "0.6231078", "0.62044", "0.61940396", "0.61729753", "0.61687416", "0.6161052", "0.6138779", "0.61345094", "0.61204773", "0.61147165", "0.6061263", "0.60580266", "0.60313046", "0.60308", "0.60308", "0.60308", "0.6023379", "0.601701" ]
0.75976545
0
Calc the global position of every local Obstacle, Returns list
def calcGlobalObstaclePosition(self, obstacles): global_obstacle_list = [] for obstacle in obstacles: #Wandeln Winkeldaten für Globalberechnung: -90zu+90 und +90zu-90 0=0 #ScanList[i][0]=degrees(asin(sin(radians(ScanList[i][0])+radians(180)))) Dx = obstacle[0] Dy = obstacle[1] #Drehmatrix für X, Returns Global Hindernis Position X=(Dx*cos(radians(self.global_kurs))+Dy*(-sin(radians(self.global_kurs))))+self.RoboPosX #Drehmatrix für Y, Returns Global Hindernis Position Y=(Dx*sin(radians(self.global_kurs))+Dy*(cos(radians(self.global_kurs))))+self.RoboPosY global_obstacle_list.append([int(X),int(Y)]) return(global_obstacle_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_obstacle_loc(self, obstacle_list):\n\n x_obst = []\n y_obst = []\n #x_obst_append = x_obst.append\n #y_obst_append = y_obst.append\n locs = []\n\n for x in obstacle_list:\n if x < self.width:\n x_obst.append(x*self.resolution + self.resolution/2)\n else:\n x_obst.append((x % self.width)*self.resolution + self.resolution/2)\n\n for y in obstacle_list:\n y_obst.append((y/self.width)*self.resolution + self.resolution/2)\n\n locs = map(lambda x: x, zip(x_obst, y_obst))\n\n return(locs)", "def getObstacles(self):\r\n ausgabeObstacle = self.globalObstaclesList + self.globalHardObstaclesList\r\n self.globalObstaclesList = []\r\n return(ausgabeObstacle)", "def updateHardObstacles(self):\r\n global_obs = self.calcGlobalObstaclePosition([[10, 20],[10, 0],[10, -20]])\r\n self.globalHardObstaclesList.extend(global_obs)", "def updateObstacles(self, obstacles):\r\n global_obs = self.calcGlobalObstaclePosition(obstacles)\r\n self.globalObstaclesList.extend(global_obs)", "def global_coords(self) -> GlobalCoordsABC:", "def calc_positions(self) :\n\t\tx, y = self.x0, self.y0\n\n\t\twhile self.is_visible(x, y) :\n\t\t\tx = 0.5 * self.gx * self.t**2 + self.vx0 * self.t + self.x0\n\t\t\ty = 0.5 * self.gy * self.t**2 + self.vy0 * self.t + self.y0\n\t\t\t\n\t\t\tself.t += self.dt\n\t\t\tself.pos_x.append(x)\n\t\t\tself.pos_y.append(y)", "def get_obstacles(self):\n return self.obstacles", "def getMovableRange(self, unit):\n CostArr_mod = modifyMovCost(CostArr, ability)\n Obstacles = self.getUnpassable(player) # units that are not passable....\n pos_list, path_list = UCS_solve(unit.pos, CostArr_mod, unit.MovPnt)\n return pos_list, path_list", "def get_obstacles(self, map_server):\n\n self.obstacle_list = []\n for index, element in enumerate(map_server):\n if element > 0:\n self.obstacle_list.append(index)\n return(self.obstacle_list)", "def l2g(local_x, local_y, hero):\n l_hero_x = (visible_squares[0]-1)/2 #8\n l_hero_y = (visible_squares[1]-1)/2 #8\n \n global_x = local_x + hero.x - l_hero_x\n global_y = local_y + hero.y - l_hero_y \n\n #global_x = hero.x + x\n #global_y = hero.y + y\n \n return global_x, global_y", "def g2l(global_x, global_y, hero):\n \n l_hero_x = (visible_squares[0]-1)/2 #8\n l_hero_y = (visible_squares[1]-1)/2 #8\n \n \n local_x = global_x - hero.x + l_hero_x\n local_y = global_y - hero.y + l_hero_y\n \n return local_x, local_y", "def GetObstaclePos(self):\n # read all points from laser scan\n ranges = self.Lidar.ranges\n # transfer to np.array\n ranges = np.array(ranges)\n # get all the outlier points to a very large value\n ranges = np.where(ranges < self.Lidar.range_min, self.Lidar.range_max + 10, ranges)\n ranges = np.where(ranges > self.Lidar.range_max, self.Lidar.range_max + 10, ranges)\n if len(ranges) != 0:\n # get all default angles\n angles = self.Lidar.angle_max - self.Lidar.angle_increment * np.arange(len(ranges))\n else:\n # if nothing detected, set the default value to zero\n ranges = None\n angles = None\n return ranges, angles", "def getPosicion(self):\r\n\t\treturn [self._x, self._y]", "def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0", "def get_all_goat_positions(self) -> List[\"Position\"]:\n return [pos for pos in self.get_all_positions() if pos.is_goat()]", "async def find_nearby_independent_worlds(context: Anacreon) -> List[World]:\n jump_beacon_trait_ids = {\n e.id\n for e in context.game_info.scenario_info\n if e.is_jump_beacon and e.id is not None\n }\n\n jump_beacon_location = [\n world.pos\n for world in context.space_objects.values()\n if isinstance(world, OwnedWorld)\n and any(\n anacreonlib.utils.world_has_trait(\n context.game_info.scenario_info, world, trait_id\n )\n for trait_id in jump_beacon_trait_ids\n )\n ]\n\n return [\n world\n for world in context.space_objects.values()\n if isinstance(world, World)\n and world.sovereign_id == 1 # Is a sovereign world\n and any(\n utils.dist(world.pos, jump_beacon_pos) <= 250\n for jump_beacon_pos in jump_beacon_location\n ) # Is in distance\n ]", "def filled_positions(self):\n return [x for x in assignable_positions if self.grid[x][0]]", "def positions_global(anim):\r\n \r\n positions = transforms_global(anim)[:,:,:,3]\r\n return positions[:,:,:3] / positions[:,:,3,np.newaxis]", "def generate_obstacles(self):\r\n obstacles = self.get_obstable_metrics\r\n obstacle_arrays = []\r\n\r\n for nb_obstacle in obstacles:\r\n empty_array = np.zeros(shape=(self.WINDOW_HEIGHT,\r\n self.WINDOW_WIDTH))\r\n start_location = 0 if nb_obstacle[2] == 1 else self.WINDOW_HEIGHT\r\n y, x = start_location - 1, nb_obstacle[3]\r\n empty_array[y, x] = -1\r\n\r\n for w_value in range(nb_obstacle[0]):\r\n x_updated = x + w_value\r\n\r\n for h_value in range(nb_obstacle[1]):\r\n if nb_obstacle[2] == 1:\r\n y_updated = y + h_value\r\n else:\r\n y_updated = y - h_value\r\n # Replace Value\r\n empty_array[y_updated, x_updated] = -1\r\n\r\n new_array = self.trim_whitespace(empty_array,\r\n nb_obstacle[2],\r\n self.MIN_GAP)\r\n obstacle_arrays.append(new_array)\r\n\r\n return obstacle_arrays", "def get_map(self) -> list:\n return self.map_obstacle", "def localize(image):\n\n # Call the vision function in order to have the grid with the obstacle and the goal coordinate\n object_grid, occupancy_grid, world = vision(image)\n\n # Correction of the goal coordinate in order to fit the A* coordinate\n goal_x = object_grid[0][1]\n goal_y = WIDTH_G - object_grid[0][0]\n goal_coor = (goal_x, goal_y)\n\n return occupancy_grid, goal_coor", "def solution(self):\n return [node.move for node in self.path()[1:]]", "def get_objpositions(scope):\n positions = []\n print('Press enter after each position has been found; press control-c to end')\n while True:\n try:\n input()\n except KeyboardInterrupt:\n break\n positions.append(scope.stage.position)\n positions[-1].insert(0,scope.nosepiece.position)\n print('Position {}: {}'.format(len(positions), tuple(positions[-1])), end='')\n return positions", "def opponentBoarderPosition(self, gameState):\n if self.red:\n i = self.midWidth\n else:\n i = self.midWidth - 1\n boudaries = [(i,j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not gameState.hasWall(i[0],i[1]):\n validPositions.append(i)\n return validPositions", "def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:\n if obstacleGrid[0][0] == 1:\n return 0\n\n m, n = len(obstacleGrid), len(obstacleGrid[0])\n dp = [[0 for _ in range(n)] for _ in range(m)]\n dp[0][0] = 1\n\n for i in range(1, m):\n if obstacleGrid[i][0] == 1: break\n else: dp[i][0] = dp[i-1][0]\n\n for j in range(1, n):\n if obstacleGrid[0][j] == 1: break\n else: dp[0][j] = dp[0][j-1]\n\n for i in range(1, m):\n for j in range(1, n):\n if obstacleGrid[i][j] == 0:\n dp[i][j] = dp[i-1][j] + dp[i][j-1]\n\n return dp[-1][-1]", "def obstacles(self):\r\n\r\n #Radious arround the head\r\n limit_sight = self.snake_sight\r\n head = self.body[0].position\r\n binary_map_complete = self.complete_mapping()\r\n map_matrix = np.matrix(binary_map_complete)\r\n obstacles = []\r\n\r\n #limits in all directions\r\n left_x = head[0] - limit_sight\r\n right_x = head[0] + limit_sight\r\n up_y = head[1] - limit_sight\r\n down_y = head[1] + limit_sight\r\n\r\n #submatrix with limits size\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:right_x+1]\r\n\r\n #Special cases where the snake approximates to the borders\r\n ##Corners\r\n if left_x < 0 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[temporal, snake_sight] \r\n return snake_sight\r\n \r\n if left_x < 0 and down_y > self.limits[1] - 1:\r\n snake_sight = map_matrix[up_y:self.limits[1], 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[temporal, snake_sight]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n\r\n ##Middle\r\n if left_x < 0:\r\n snake_sight = map_matrix[up_y:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n return snake_sight\r\n\r\n if right_x > self.limits[0]-1:\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n return snake_sight\r\n\r\n if up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:right_x+1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[interval_y_matrix, snake_sight]\r\n return snake_sight\r\n \r\n if down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:right_x+1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[snake_sight, interval_y_matrix]\r\n return snake_sight\r\n\r\n return snake_sight", "def obstacles(p):\n c1 = np.array([-0.5,-1.])\n r1 = 1.\n c2 = np.array([0.75,0.5])\n r2 = 0.5\n return [\n (p[0] + 2, np.array([1.,0.])), # left\n (2 - p[0], np.array([-1.,0.])), # right\n (p[1] + 1, np.array([0.,1.])), # bottom\n (1 - p[1], np.array([0.,-1.])), # top\n (norm(p - c1) - r1, (p - c1)/norm(p - c1)), # circle 1\n (norm(p - c2) - r2, (p - c2)/norm(p - c2)) # circle 2\n ]", "def _find_obstacle(self, obstacle_type='*traffic_light*'): \r\n obst = list()\r\n \r\n _actors = self._world.get_actors()\r\n _obstacles = _actors.filter(obstacle_type)\r\n\r\n\r\n for _obstacle in _obstacles:\r\n trigger = _obstacle.trigger_volume\r\n\r\n _obstacle.get_transform().transform(trigger.location)\r\n \r\n distance_to_car = trigger.location.distance(self._vehicle.get_location())\r\n\r\n a = np.sqrt(\r\n trigger.extent.x ** 2 +\r\n trigger.extent.y ** 2 +\r\n trigger.extent.z ** 2)\r\n b = np.sqrt(\r\n self._vehicle.bounding_box.extent.x ** 2 +\r\n self._vehicle.bounding_box.extent.y ** 2 +\r\n self._vehicle.bounding_box.extent.z ** 2)\r\n\r\n s = a + b + 10\r\n \r\n if distance_to_car <= s:\r\n # the actor is affected by this obstacle.\r\n obst.append(_obstacle)\r\n\r\n \"\"\"self._debug.draw_box(carla.BoundingBox(_obstacle.get_transform().location, carla.Vector3D(0.5,0.5,2)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,255,0,0),\r\n 0\r\n )\"\"\"\r\n \"\"\"self._debug.draw_box(carla.BoundingBox(trigger.location, carla.Vector3D(0.1,0.1,10)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n \r\n \"\"\"self._debug.draw_box(carla.BoundingBox(trigger.location, carla.Vector3D(0.1,0.1,2)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n \"\"\"self._debug.draw_box(trigger,\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n\r\n return obst", "def getPosition(self):\n\t\txxx1 = self.stokes()\n\t\txxx2 = self.thp()\n\t\txxx3 = self.tthp()\n\t\treturn [xxx1, xxx2, xxx3]", "def boarderPosition(self, gameState):\n if gameState.isOnRedTeam(self.index):\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i,j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not gameState.hasWall(i[0],i[1]):\n validPositions.append(i)\n return validPositions" ]
[ "0.74064344", "0.707515", "0.6628567", "0.6443536", "0.6411638", "0.62849295", "0.62457395", "0.62181544", "0.60758865", "0.6068896", "0.6041863", "0.6032454", "0.6019976", "0.5921112", "0.5918362", "0.5913744", "0.58780336", "0.5858981", "0.5829376", "0.58060443", "0.5805806", "0.5801988", "0.57962775", "0.5794979", "0.57531446", "0.5749243", "0.57434297", "0.5734762", "0.57258224", "0.57128745" ]
0.8241253
0
Pickel Robos Path every Xsec.
def saveRoboPath(self): if time.time()-self.timeold > 2: self.RoboPath.append([round(self.RoboPosX,1),round(self.RoboPosY,1)]) self.timeold = time.time()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_path(self, path):\n self.clear_path()\n for coordinate in path:\n self.send_coordinate(coordinate)\n time.sleep(0.05)", "def change_to_with_delay(_path: str):\n time.sleep(1)", "def run(self):\n r = rospy.Rate(100)\n while not rospy.is_shutdown():\n r.sleep()", "def path_length(self,path,num_repeats=10):\n begin_time=datetime.datetime.now()\n #num_repeats=100\n for i in range(num_repeats):\n self.virtual_move_to(path)\n end_time=datetime.datetime.now()\n delta_t=end_time-begin_time\n path_length=delta_t.total_seconds()/float(num_repeats)\n if path_length ==0.0:\n print(\"Warning the path length is less than 1 microsecond, make sure num_repeats is high enough to measure it.\")\n return path_length", "def run(every=45):\n print(f\"Scheduling refuel time for every {every} minutes.\")\n seconds = every * 60\n pic = Path.joinpath(Path(__file__).parent, \"pic.png\")\n try:\n img = Image.open(pic)\n while(True):\n for i in tqdm.trange(seconds):\n time.sleep(1)\n print(f\"Taking rest at {time.ctime()}\")\n img.show()\n except:\n print(\"Have a good day!\")\n img.close()", "def single_timed_path(spot,generator,times,r_param,vol_param):\n future_spots = np.zeros_like(times)\n future_spots[0] = spot\n if isinstance(generator, Antithetic):\n print(\"Warning ( optionpricer.path.single_timed_path() ): generating a \\\n timed sequence with antithetic generator\")\n for i in range(1,len(times)):\n r,var,mu,discount = get_path_constants(times[i-1], times[i],r_param, vol_param)\n rand_vals = generator.get_samples(1)\n future_spots[i] = future_spots[i-1]*np.exp(mu)\n future_spots[i] *= np.exp(np.sqrt(var)*rand_vals)\n #future_spots *= discount\n return future_spots", "def spin(self, rate=50):\n spin_rate = rospy.Rate(rate)\n while not rospy.is_shutdown():\n if self.__current_pose and self.__waypoints_tree:\n idx = self.get_nearest_waypoint_id(self.__current_pose)\n self.update_waypoints(idx)\n\n spin_rate.sleep()", "def load(self, filepath=''):\n sleep(20)\n pass", "def control_loop():\n global rate, regions, speed\n rospy.init_node('ebot_controller')\n rate = rospy.Rate(10)\n rospy.Subscriber('/ebot/laser/scan', LaserScan, laser_callback)\n rospy.Subscriber('/odom', Odometry, odom_callback)\n speed = Twist()\n speed.linear.x = 0\n speed.angular.z = 0\n pub.publish(speed)\n while not rospy.is_shutdown():\n if x <= 6.28:\n user_x, user_y = waypoints(15)\n go_to_x_y(user_x, user_y, 1)\n go_to_x_y(user_x, user_y, 0.25)\n else:\n go_to_x_y(12.5, 0, 3)\n go_to_x_y(12.5, 0, 1)\n go_to_x_y(12.5, 0, 0.2)\n exit()\n speed.linear.x = 0\n speed.angular.z = 0\n pub.publish(speed)\n rate.sleep()", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def target_position(self, time):\n \"\"\"\n start_pos = self.points[self.cur_start]\n seg_time = time - self.last_checkpoint_time\n\n #The arguement of target-velocity dosent matter\n cur_pos = self.target_velocity(time)*seg_time + start_pos\n\n \n # or time > (self.total_time / 4)*(self.cur_start + 1)\n cur_pos_norm = length(cur_pos - start_pos)\n\n next_corner = self.points[(self.cur_start + 1)%4]\n \n seg_norm = length(next_corner - start_pos)\n print(\"cur_pos : \", cur_pos, \"segment: \", self.cur_start, seg_norm - cur_pos_norm)\n\n if cur_pos_norm >= seg_norm:\n self.cur_start = (self.cur_start + 1) % 4\n self.last_checkpoint_time = time\n return cur_pos\n \"\"\"\n\n #Possibly use rospy.sleep()\n total_time = self.total_time\n\n\n if time < total_time/4:\n return self.path1.target_position(time)\n\n elif time - total_time/4 == 0:\n rospy.sleep(0.5)\n\n elif time < total_time/2:\n return self.path2.target_position(time - (total_time/4 + 0.5))\n # return self.path2.target_position(time - (total_time/4 ))\n\n\n elif time - total_time/2 == 0:\n rospy.sleep(0.5)\n\n elif time <= total_time/4*3:\n return self.path3.target_position(time - (total_time/2 + 1))\n # return self.path3.target_position(time - (total_time/2))\n\n\n elif time - total_time/4*3 == 0:\n rospy.sleep(0.5)\n\n else:\n return self.path4.target_position(time - (total_time/4*3 + 1.5))\n # return self.path4.target_position(time - (total_time/4*3))", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def nod():\n while True:\n MOVEMENTS.set_raw_angle(7, 52)\n sleep(2)\n MOVEMENTS.set_raw_angle(7, 0)\n sleep(2)", "def calculateFirstPath(self):\n rd.shuffle(self.goals)\n self.path = self.goals", "def teleopPeriodic(self):\n self.drive.arcadeDrive(1, 0)\n self.brushless.set(1)\n self.spark.set(self.joystick.getY())", "def simulate(seconds):\n\n #Grab the start time\n start_time = dt.datetime.now()\n\n # fill list with the start\n times_on_the_second = [start_time + dt.timedelta(seconds=x) for x in range(seconds + 1)]\n\n #end_time = start_time + dt.timedelta(seconds=seconds)\n\n end_time = times_on_the_second[-1]\n epochs = 0\n\n\n\n print(f\"Simulation started at {start_time}\")\n\n while dt.datetime.now() < end_time:\n\n while dt.datetime.now() < times_on_the_second[epochs]:\n pass\n\n for asteroid in Controller.currentAsteroids:\n asteroid.move()\n print(asteroid, F\"time: {dt.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]}\")\n epochs += 1\n\n\n\n # time.sleep(1)", "def cherry_pick(self, alarm):\n raise NotImplementedError()", "def run( self, cycles=-1 ):", "def wait(self, cycles):\n\t\tself.planet.tiles[self.y][self.x].set_occupant() # set occupant to the initial tile\n\t\tif self.planet.tiles[self.y][self.x].is_shaded: # in 'plain' the rover will recharge itself\n\t\t\ti = 0\n\t\t\twhile i < int(cycles):\n\t\t\t\tif self.battery < 100:\n\t\t\t\t\tself.battery += 1\n\t\t\t\ti += 1", "def run(self):\n \n rospy.spin()", "def spin(self):\n rate = rospy.Rate(5) # hz\n while not rospy.is_shutdown():\n if self.parameters.charging:\n self.dynamic_reconfigure_server.update_configuration({\"charging_percentage\": min(100, self.battery.percentage + self.charging_increment)})\n else:\n self.dynamic_reconfigure_server.update_configuration({\"charging_percentage\": max(0, self.battery.percentage - self.charging_increment)})\n self.battery.header.stamp = rospy.get_rostime() # get_rostime() returns the time in rospy.Time structure\n self.battery_publisher.publish(self.battery)\n rate.sleep()", "def _control(self):\n j = 0\n while not rospy.is_shutdown():\n if self.running:\n # Track the path for each vehicle.\n for i, vehicle_id in enumerate(self.vehicle_ids):\n omega = self._get_omega(vehicle_id)\n self.pub_omega.publish(vehicle_id, omega)\n\n # At the start of operation: start each vehicle with a constant\n # speed one after another, delayed with a set amount of samples.\n if j < self.headstart_samples * len(self.vehicle_ids):\n index = j / self.headstart_samples\n vehicle_id = self.vehicle_ids[index]\n\n self.pub_speed.publish(vehicle_id, self.v)\n\n # Normal operation: only control the speeds of the follower\n # vehicles. The velocities are obtained from the PIDs.\n else:\n for i, vehicle_id in enumerate(self.vehicle_ids[1:]):\n vel = self._get_vel(vehicle_id)\n self.pub_speed.publish(vehicle_id, vel)\n\n time.sleep(1. / self.rate)\n\n j += 1", "def wait_spinner(self, wait_time):\n#------affichage d'un element du cycle dans la boucle\n#----- pour montré qu'on attend \n spinner = itertools.cycle(['-', '/', '|', '\\\\'])\n \n try:\n self.logger.debug(u\"wait for max '%s' minute(s)\"%wait_time) \n time.sleep(1.0)\n#-------------------on attend que le device ne soit plus en MOVING ou on attend un period de temps \n end_time = datetime.datetime.now()+datetime.timedelta(minutes=wait_time)\n while end_time >= datetime.datetime.now() and self._ismoving():\n time.sleep(0.1)\n #affichage l'element du cycle\n sys.stdout.write(spinner.next())\n sys.stdout.flush()\n #-----effacer l'element affiché\n sys.stdout.write('\\b')\n except Exception, details :\n self.error_message.set_error_message(\"Wait error : \",str(details))\n raise", "def work(self):\n time.sleep(random.randint(0, 200) / 100)\n pass", "def sleeper(self):\n for waittime in (.01, .02, .05, .1, .2, .5):\n yield waittime\n while True:\n waittime = min(waittime + .2, 5)\n yield waittime", "def wait_forever(self):\r\n while True:\r\n time.sleep(0.5)", "def loop(self):\n pass", "def play(self, pathFindingStategy: PathFindingStrategy) -> None:" ]
[ "0.5697258", "0.54999626", "0.5414542", "0.5356699", "0.53323853", "0.5246065", "0.5175965", "0.5170865", "0.51637995", "0.51113075", "0.51113075", "0.51113075", "0.5107844", "0.51071584", "0.5102906", "0.50996655", "0.50768256", "0.5040481", "0.50338733", "0.5033516", "0.50134355", "0.4993419", "0.49599454", "0.49569395", "0.49199238", "0.49087545", "0.4900616", "0.4900193", "0.48933676", "0.48927155" ]
0.55648315
1
Set Robo Position zb bei Start
def setRoboPos(self,x,y): self.RoboPosX=x self.RoboPosY=y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_start_position(self) -> None:\n self.cozmo.set_head_angle(degrees(0)).wait_for_completed()\n self.cozmo.set_lift_height(0.0).wait_for_completed()", "def setPosition(position):", "def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY", "def SetCurrentPosition(self,pos):\n\n if self.Reverse: pos*=-1\n self.Bus.Transaction(chr(self.Address)+chr(0x40)+struct.pack('@l',pos))", "def move_to_start(self):\n self.pos = (SCREEN_WIDTH / 2, SCREEN_HEIGHT - 64)", "def set_starting_pos(self):\n if self.start and self.is_unoccupied(*self.start):\n self.current_pos = self.start[:]\n else:\n self.set_random_pos('starting')", "def set_to_start(self) -> None:\n start_config = self._path_points[0]\n self._mobile.set_2d_pose(start_config[:3])\n self._path_done = False", "def start(self) -> global___Pos:", "def setPos(self, pos):\n self.cameraNode.setPos(pos)", "def reset_position(self):\n import interface\n\n print(\"Start restet position...\")\n\n sign = lambda x: int(x > 0) - int(x < 0) # Renvoi le signe de x (-1, 0, 1).\n fact_speed = 0.7 # On divise les vitesses.\n\n eps_angle = np.pi*20/180 # Tolerance angulaire. (en radian)\n eps_pos = 50 # Tolerance sur le carre centre autour du point d'arrive (en pxl).\n x0, y0 = 320, 230 # Point a atteindre.(en pxl)\n\n self.position, self.orientation = interface.get_position()\n\n # Calcul de l'angle entre barycentre de la voiture et point de depart.\n def get_alpha():\n \"\"\"\n Recupere l'angle entre l'axe horizontal et le vecteur position de la voiture.\n \"\"\"\n norm = np.sqrt((self.position[1] - y0)**2 + (self.position[0] - x0)**2)\n if norm:\n return np.arccos((self.position[0] - x0)/norm) * (1 - 2*(self.position[1] > y0))\n return 0\n\n control_angle = lambda a: (a+np.pi)%(2*np.pi) - np.pi\n\n # alpha : orientation souhaitee de la voiture pour retourner au point de depart (comprise entre -pi et +pi)\n\n # As long as we are not in the direction of the center, the car rotates on itself\n print(\"angle de la voiture en degre:\", self.orientation*180/np.pi)\n print(\"angle qui reste a faire:\", control_angle(np.pi - get_alpha() + self.orientation))\n print(\"\\tOrientation vers la cible....\")\n fact_bis = fact_speed\n while abs(control_angle(np.pi - get_alpha() + self.orientation)) > eps_angle:\n # while True:\n fact_bis *= 1.01\n # interface.move_wheel(\"l\", -0.4)\n # interface.move_wheel(\"r\", 0.4)\n interface.move_wheel(\"l\", -fact_bis*control_angle(np.pi + get_alpha() - self.orientation)/np.pi)\n interface.move_wheel(\"r\", fact_bis*control_angle(np.pi + get_alpha() - self.orientation)/np.pi)\n self.position, self.orientation = interface.get_position()\n print(\"Orientation: \", control_angle(np.pi - get_alpha() + self.orientation),\n \"position actuelle: \", self.position, self.orientation)\n # print(\"fact speed : \", fact_bis)\n # As long as we are not at the center, the car goes straight\n interface.move_wheel(\"\", 0)\n\n input(\"suite\")\n\n print(\"\\tavancer vers la cible\")\n while abs(x0 - self.position[0]) > eps_pos or abs(y0 - self.position[1]) > eps_pos:\n # print(abs(x0 - self.position[0]), abs(y0 - self.position[1]))\n print(\"Avancer vers la cible - distance\", 0.5*(np.sqrt((self.position[1] - y0)**2 + (self.position[0] - x0)**2) / norm))\n interface.move_wheel(\"\", (0.5*(np.sqrt((self.position[1] - y0)**2 + (self.position[0] - x0)**2) / norm)))\n self.position, self.orientation = interface.get_position()\n print(\"Avancer vers la cible - position : \", self.position, self.orientation)\n\n # As long as the the car is not facing the chosen direction, it rotates on itself\n interface.move_wheel(\"\", 0)\n print(\"\\torientation finale\")\n while abs(np.pi/2 - self.orientation) > eps_angle:\n print(\"Orientation finale - Angle : \", abs(np.pi/2 - self.orientation))\n interface.move_wheel(\"l\", -fact_speed*(0.5+0.5*(abs(abs(self.orientation)-np.pi/2))/np.pi))\n interface.move_wheel(\"r\", fact_speed*(0.5+0.5*(abs(abs(self.orientation)-np.pi/2))/np.pi))\n self.position, self.orientation = interface.get_position()\n\n interface.move_wheel(\"\", 0)\n print(\"\\tterminated\")", "def _move(self, pos):\n self.put_par(\"drive\", pos)", "def set_rival_move(self, pos):\n self.board[pos[0]][pos[1]] = 2", "def set_pos(self, x, y, orien):\n self.pos_x = x\n self.pos_y = y\n self.orientation = orien", "def move_to_position1(self):", "def move_to_position2(self):", "def update(self):\n self.bpos_x += 3", "def reset_position(self):\n self.goto(STARTING_POSITION)", "def __init__(self):\n self.position = 0", "def move(self, p):\r\n self.position.setvalue(p)", "def set_position(self, pos, debug=False):\n pos = max(pos, 0)\n pos = min(pos, 1)\n posrange = pos * self.range\n pos = posrange + self.min\n if debug:\n print('Setting Dynamixel {} with posrange {} to position {}'.format(self.id, posrange, pos))\n self.motor.set_position(int(pos))", "def reset_position(self): \n self.rect.x = 400\n self.rect.y = 400\n \n # Specifies the Player's spawnpoint as maze_arrangement[8][8], representing\n # the tile in the center of the maze \n self.__minotaur_x = 8\n self.__minotaur_y = 8", "def set_start_pos(self, start_pos):\n if self.is_pos_valid(start_pos):\n self.start_pos = start_pos\n self.set_curr_pos(start_pos)\n self.curr_map = self.get_curr_visible_map(self.start_pos)\n if self.dstar:\n self.dstar_curr_map = self.get_curr_dstar_visible_map(\n self.start_pos)\n self.pos_history = [start_pos]\n # update state map\n self.state_map[0, 0] = self.get_state_map()\n # self.state_map[0, 1] = self.curr_map\n else:\n print (\"[MESSAGE] WARNING: The position is not valid, nothing\"\n \" changes. (by set_start_pos)\")", "def AeroMove(self, pos):\r\n\r\n pass", "def _move(self, pos):\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos", "def set_position(self, x, y):\n self.tx = -x\n self.ty = -y", "def setzePosition(self, x, y):\n self.zielX = x\n self.zielY = y", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):" ]
[ "0.6879765", "0.63435316", "0.63344735", "0.6325999", "0.6276773", "0.61788565", "0.6124393", "0.60797316", "0.5994932", "0.5965214", "0.59490186", "0.5944862", "0.5942923", "0.59371793", "0.5925291", "0.59103125", "0.58716786", "0.5862039", "0.5848119", "0.584693", "0.5845706", "0.5840269", "0.5831416", "0.5825999", "0.58225", "0.5798172", "0.57939607", "0.57939607", "0.57939607", "0.57939607" ]
0.7795507
0
return latest Obstacles and clears obstacles
def getObstacles(self): ausgabeObstacle = self.globalObstaclesList + self.globalHardObstaclesList self.globalObstaclesList = [] return(ausgabeObstacle)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_obstacles(self):\n self.obstacles = np.array([])", "def get_obstacles(self):\n return self.obstacles", "def updateHardObstacles(self):\r\n global_obs = self.calcGlobalObstaclePosition([[10, 20],[10, 0],[10, -20]])\r\n self.globalHardObstaclesList.extend(global_obs)", "def update_obstacles(self, new_obs):\n self.obstacles = new_obs", "def reset(self):\n self.obstacles = []\n self._tick = 0", "def updateObstacles(self, obstacles):\r\n global_obs = self.calcGlobalObstaclePosition(obstacles)\r\n self.globalObstaclesList.extend(global_obs)", "def generate_obstacles(self):\r\n obstacles = self.get_obstable_metrics\r\n obstacle_arrays = []\r\n\r\n for nb_obstacle in obstacles:\r\n empty_array = np.zeros(shape=(self.WINDOW_HEIGHT,\r\n self.WINDOW_WIDTH))\r\n start_location = 0 if nb_obstacle[2] == 1 else self.WINDOW_HEIGHT\r\n y, x = start_location - 1, nb_obstacle[3]\r\n empty_array[y, x] = -1\r\n\r\n for w_value in range(nb_obstacle[0]):\r\n x_updated = x + w_value\r\n\r\n for h_value in range(nb_obstacle[1]):\r\n if nb_obstacle[2] == 1:\r\n y_updated = y + h_value\r\n else:\r\n y_updated = y - h_value\r\n # Replace Value\r\n empty_array[y_updated, x_updated] = -1\r\n\r\n new_array = self.trim_whitespace(empty_array,\r\n nb_obstacle[2],\r\n self.MIN_GAP)\r\n obstacle_arrays.append(new_array)\r\n\r\n return obstacle_arrays", "def recreate_obstacles(self):\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.obstacles = self.create_obstacles()", "def draw_obstacles(self):\n for obstacle in self.obstacles:\n obstacle.draw(self.window, Colors.BLACK.value)", "def updateObstacleMap(self):\n\n all_sensor_readings = self.laser_readings + self.sonar_readings\n\n #we remove all the sensor readings that occur inside the robot frame\n restricted_sensor_readings = []\n for pt in all_sensor_readings:\n if not self.obstacle_map.inRobot(pt):\n restricted_sensor_readings.append(pt)\n\n #add the obstacles to the obstacle map\n self.obstacle_map_lock.acquire()\n self.obstacle_map.addObstacles(restricted_sensor_readings)\n self.obstacle_map_lock.release()\n\n return", "def obstacles(self):\r\n\r\n #Radious arround the head\r\n limit_sight = self.snake_sight\r\n head = self.body[0].position\r\n binary_map_complete = self.complete_mapping()\r\n map_matrix = np.matrix(binary_map_complete)\r\n obstacles = []\r\n\r\n #limits in all directions\r\n left_x = head[0] - limit_sight\r\n right_x = head[0] + limit_sight\r\n up_y = head[1] - limit_sight\r\n down_y = head[1] + limit_sight\r\n\r\n #submatrix with limits size\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:right_x+1]\r\n\r\n #Special cases where the snake approximates to the borders\r\n ##Corners\r\n if left_x < 0 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[temporal, snake_sight] \r\n return snake_sight\r\n \r\n if left_x < 0 and down_y > self.limits[1] - 1:\r\n snake_sight = map_matrix[up_y:self.limits[1], 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[temporal, snake_sight]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n\r\n ##Middle\r\n if left_x < 0:\r\n snake_sight = map_matrix[up_y:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n return snake_sight\r\n\r\n if right_x > self.limits[0]-1:\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n return snake_sight\r\n\r\n if up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:right_x+1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[interval_y_matrix, snake_sight]\r\n return snake_sight\r\n \r\n if down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:right_x+1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[snake_sight, interval_y_matrix]\r\n return snake_sight\r\n\r\n return snake_sight", "def create_obstacles(self) -> List[Square]:\n obstacles_number = random.randint(1, self.maximum_obstacles_on_board)\n obstacles = list()\n\n while len(obstacles) < obstacles_number:\n\n obstacle_x_pos = random.randint(0, Dimension.board_width() - 1)\n obstacle_y_pos = random.randint(0, Dimension.board_height() - 1)\n obstacle = Square(obstacle_x_pos, obstacle_y_pos)\n if obstacle not in obstacles:\n self.board_matrix[obstacle_y_pos][obstacle_x_pos] = 0\n obstacles.append(obstacle)\n\n return obstacles", "def remove_dead_obstacles(obstacle_list):\n\tfor obstacle in obstacle_list:\n\t\tobstacle.lifetime -= 1\n\t\tif obstacle.lifetime == 0:\n\t\t\tobstacle_list.remove(obstacle)\n\t\tprint(obstacle)", "def clear_for_new_board(self):\r\n self.game_board = []\r\n self.good_contours = []\r\n self.game_board_contours = []", "def get_obstacles_map(obstacles, placed_pecies):\n \n #create a mask image to draw the obstacles on\n blocks = np.zeros(ARENA_SIZE[::-1], np.uint8)\n\n #get the grid points where the robot needs to placed\n grid = get_grid(ARENA_SIZE)\n\n #draw the obstacles and their safety region on the map\n for i in obstacles.keys():\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(obstacles[i][0]/4), i[1]-int(obstacles[i][1]/4)), (i[0]+int(obstacles[i][0]/4), i[1]+int(obstacles[i][1]/4)), 255, -1)\n\n #draw the obstacles and their safety region on the map\n for i in placed_pecies.keys():\n try:\n if not i == grid[5]:\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n else:\n cv2.rectangle(blocks, (int(i[0]-7.4*placed_pecies[i][0]/4), int(i[1]-7.4*placed_pecies[i][1]/4)),\n (int(i[0]+7.4*placed_pecies[i][0]/4), int(i[1]+7.4*placed_pecies[i][1]/4)), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(placed_pecies[i][0]/4), i[1]-int(placed_pecies[i][1]/4)), (i[0]+int(placed_pecies[i][0]/4), i[1]+int(placed_pecies[i][1]/4)), 255, -1)\n except Exception as e:\n print(e)\n\n return cv2.bitwise_not(blocks)", "def _find_obstacle(self, obstacle_type='*traffic_light*'): \r\n obst = list()\r\n \r\n _actors = self._world.get_actors()\r\n _obstacles = _actors.filter(obstacle_type)\r\n\r\n\r\n for _obstacle in _obstacles:\r\n trigger = _obstacle.trigger_volume\r\n\r\n _obstacle.get_transform().transform(trigger.location)\r\n \r\n distance_to_car = trigger.location.distance(self._vehicle.get_location())\r\n\r\n a = np.sqrt(\r\n trigger.extent.x ** 2 +\r\n trigger.extent.y ** 2 +\r\n trigger.extent.z ** 2)\r\n b = np.sqrt(\r\n self._vehicle.bounding_box.extent.x ** 2 +\r\n self._vehicle.bounding_box.extent.y ** 2 +\r\n self._vehicle.bounding_box.extent.z ** 2)\r\n\r\n s = a + b + 10\r\n \r\n if distance_to_car <= s:\r\n # the actor is affected by this obstacle.\r\n obst.append(_obstacle)\r\n\r\n \"\"\"self._debug.draw_box(carla.BoundingBox(_obstacle.get_transform().location, carla.Vector3D(0.5,0.5,2)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,255,0,0),\r\n 0\r\n )\"\"\"\r\n \"\"\"self._debug.draw_box(carla.BoundingBox(trigger.location, carla.Vector3D(0.1,0.1,10)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n \r\n \"\"\"self._debug.draw_box(carla.BoundingBox(trigger.location, carla.Vector3D(0.1,0.1,2)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n \"\"\"self._debug.draw_box(trigger,\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n\r\n return obst", "def remove_obstacle(self, x, y):\n self.BOARD[y][x].traversable = True\n self.board_array[y][x] = 0", "def spawn_obstacles(self):\n self.obstacle_sprites.empty()\n number_of_obstacles = random.randint(MIN_OBSTACLES, MAX_OBSTACLES)\n while len(self.obstacle_sprites) < number_of_obstacles:\n obstacle = Obstacle(random.randrange(0, WIDTH), random.randrange(HEIGHT - 500, HEIGHT))\n obstacle_collision = pygame.sprite.spritecollide(obstacle, self.obstacle_sprites, False)\n if not obstacle_collision:\n self.obstacle_sprites.add(obstacle)", "def get_obstacles(image):\n\n ih, iw = image.shape[:2]\n image_copy = image.copy()\n\n #resize the image to the size of arena\n image = cv2.resize(image, ARENA_SIZE, interpolation=cv2.INTER_CUBIC)\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\n #replace all black pixels to white pixels\n gray[np.where(gray == 0)]= 255\n\n #get the thresholded binary image\n ret,threshold = cv2.threshold(gray,200,255,cv2.THRESH_BINARY_INV)\n\n #find all the countours in the binary image\n _, contours, heiarchy = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n cont = []\n\n #create a mask to draw contours on\n blocks = mask = np.zeros(threshold.shape[:2], np.uint8)\n\n #create a dictionary to hold image roi of all puzzle peices\n blocks_roi = {}\n\n #iterate through all contours\n for i, c in enumerate(contours[1:]):\n\n #find the minimum area fitting rectangle of the contour\n rect = cv2.minAreaRect(c)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n\n #create the copy of the mask\n mask_copy = mask.copy()\n\n #draw the rectangle on the mask\n cv2.drawContours(mask_copy, [box], -1, (255,255,255), 3)\n\n #floodfill the rectangle\n cv2.floodFill(mask_copy, None, (0,0), 255)\n mask_inv = cv2.bitwise_not(mask_copy)\n blocks = cv2.add(blocks, mask_inv)\n\n _, contours, heiarchy = cv2.findContours(blocks, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n obstacles = {}\n\n for c in contours:\n x,y,w,h = cv2.boundingRect(c)\n obstacles.update({(int(x+w/2), int(y+h/2)): BLOCK_SIZE})\n #obstacles.update({(int(x+w/2), int(y+h/2)): (w, h)}) # for unknown block sizes\n bottom_r = remap((x+w, y+h), ARENA_SIZE, (iw,ih))\n top_l = remap((x, y), ARENA_SIZE, (iw,ih))\n blocks_roi.update({(int(x+w/2), int(y+h/2)): image_copy[top_l[1]:bottom_r[1], top_l[0]:bottom_r[0]]})\n\n return obstacles, blocks_roi", "def reset(self, grid, disallowed, num_of_obstacles):\n # self.array.clear()\n random_array = []\n\n # If I want the obstacles in the same location every episode\n # random.seed(10)\n\n # Make a copy of the grid\n allowed = grid[:]\n\n [allowed.remove(pos) for pos in disallowed]\n\n for i in range(num_of_obstacles):\n new_pos = random.choice((allowed))\n self.array.append(new_pos)\n random_array.append(new_pos)\n allowed.remove(new_pos)\n\n self.array_length = self.array_length + num_of_obstacles\n\n return random_array", "def clear(self):\n self.best_moves = []\n self.best_times = []", "def get_obstacles(self, map_server):\n\n self.obstacle_list = []\n for index, element in enumerate(map_server):\n if element > 0:\n self.obstacle_list.append(index)\n return(self.obstacle_list)", "def check_for_obstacles(self):\n obs = False\n obs_p = []\n for point in self.obstacles:\n if -0.15 <= point[1] <= 0.15: # robot is 178mm wide\n # Obstacles should be less than or equal to 0.2 m away before being detected\n if 0 <= point[0] <= .2:\n obs_p.append(point)\n obs = True\n if obs:\n pos = self.determine_pos_of_obstacle(obs_p)\n data = Obstacle()\n data.x = pos[0]\n data.y = pos[1]\n data.obstacle = True\n self.obs_pub.publish(data)", "def reset_objects(new_game_map):\r\n new_coins = []\r\n new_enemies = []\r\n y_val = 0\r\n for row in new_game_map:\r\n x_val = 0\r\n for tile in row:\r\n if tile == '3':\r\n new_coins.append(pygame.Rect((x_val * TILE_SIZE), (y_val * TILE_SIZE), TILE_SIZE, TILE_SIZE))\r\n if tile == '4':\r\n new_enemies.append([[0, 0], pygame.Rect((x_val * TILE_SIZE), (y_val * TILE_SIZE), TILE_SIZE, TILE_SIZE), 1, True, ['enemy_move', 0]])\r\n x_val += 1\r\n y_val += 1\r\n return new_coins, new_enemies, []", "async def show_obstacles(canvas):\n\n while True:\n boxes = []\n\n for obstacle in obstacle_manager:\n boxes.append(obstacle.get_bounding_box())\n\n for x, y, frame in boxes:\n draw_frame(canvas, x, y, frame)\n\n await Sleep(1)\n\n for x, y, frame in boxes:\n draw_frame(canvas, x, y, frame, negative=True)", "async def show_obstacles(canvas):\n\n while True:\n boxes = []\n\n for obstacle in OBSTACLES:\n boxes.append(obstacle.dump_bounding_box())\n\n for row, column, frame in boxes:\n draw_frame(canvas, row, column, frame)\n\n await asyncio.sleep(0)\n\n for row, column, frame in boxes:\n draw_frame(canvas, row, column, frame, negative=True)", "def place_obstacles():\n #Randomly generate different sized rectangles\n #Soem may overlap, which gives more variety in shape of obstacles\n xvals = np.random.randint(0,self.map_dimensions[1],size=self.N_obstacles)\n yvals = np.random.randint(0,self.map_dimensions[0],size=self.N_obstacles)\n lower_left = zip(xvals,yvals)\n rects = []\n for LL in lower_left:\n x = LL[0]\n y = LL[1]\n wmax = self.map_dimensions[1] - x\n w = np.random.randint(0,wmax,size=1)[0]\n hmax = self.map_dimensions[0] - y\n h = np.random.randint(0,hmax,size=1)[0]\n rects += [(x,y,w,h)]\n self.coordinates__obstacles = rects", "def getObstacles(request):\n # Validate user made a GET request\n if request.method != 'GET':\n logger.warning('Invalid request method for obstacle info request.')\n logger.debug(request)\n return HttpResponseBadRequest('Request must be GET request.')\n # Validate user is logged in to make request\n if not request.user.is_authenticated():\n logger.warning('User not authenticated for obstacle info request.')\n logger.debug(request)\n return HttpResponseBadRequest('User not logged in. Login required.')\n\n # Log user access to obstacle info\n logger.info('User downloaded obstacle info: %s.' % request.user.username)\n access_log = ObstacleAccessLog()\n access_log.user = request.user\n access_log.save()\n\n # Form JSON response portion for stationary obstacles\n stationary_obstacles_cached = True\n stationary_obstacles_key = '/StationaryObstacle/all'\n stationary_obstacles = cache.get(stationary_obstacles_key)\n if stationary_obstacles is None:\n stationary_obstacles = StationaryObstacle.objects.all()\n stationary_obstacles_cached = False\n stationary_obstacles_json = list()\n for cur_obst in stationary_obstacles:\n # Add current obstacle\n cur_obst_json = cur_obst.toJSON()\n stationary_obstacles_json.append(cur_obst_json)\n\n # Form JSON response portion for moving obstacles\n moving_obstacles_cached = True\n moving_obstacles_key = '/MovingObstacle/all'\n moving_obstacles = cache.get(moving_obstacles_key)\n if moving_obstacles is None:\n moving_obstacles = MovingObstacle.objects.all()\n moving_obstacles_cached = False\n moving_obstacles_json = list()\n for cur_obst in moving_obstacles:\n # Add current obstacle\n cur_obst_json = cur_obst.toJSON()\n moving_obstacles_json.append(cur_obst_json)\n\n # Form final JSON response\n data = {\n 'stationary_obstacles': stationary_obstacles_json,\n 'moving_obstacles': moving_obstacles_json\n }\n\n # Cache obstacles for next request\n if not stationary_obstacles_cached:\n cache.set(stationary_obstacles_key, stationary_obstacles)\n if not moving_obstacles_cached:\n cache.set(moving_obstacles_key, moving_obstacles)\n\n # Return JSON data\n return HttpResponse(json.dumps(data),\n content_type=\"application/json\")", "def update(self, players):\n # if self._tick % 75 == 0:\n # pos = Vector2(100 + self._tick % 1240, -200)\n # radius = 50 + self._tick % 200\n # dir = Vector2(-5.5 + self._tick % 9, 2 + self._tick % 5)\n if self._tick % 25 == 0:\n pos = Vector2(((self._tick / 25) * 100) % 505, -100)\n radius = 50\n dir = Vector2(0, 4)\n self.obstacles.append(Obstacle(pos, radius, dir))\n\n self.obstacles = [\n Obstacle(add(obstacle.pos, obstacle.dir), obstacle.radius, obstacle.dir)\n for obstacle in self.obstacles\n if obstacle.pos.y < 550\n ]\n\n self._tick = self._tick + 1", "def recall(self):\n for t in self.placed_tiles:\n row = self.placed_tiles[t][1][0]\n col = self.placed_tiles[t][1][1]\n # remove tiles from board\n self.board.board[row][col].letter = None\n # put tiles back on rack\n self.rack[t] = self.placed_tiles[t][0]" ]
[ "0.7343851", "0.73193496", "0.7129685", "0.6885037", "0.6878335", "0.6839208", "0.6755548", "0.66489303", "0.660735", "0.64975613", "0.64508957", "0.6420063", "0.62252665", "0.6134923", "0.612027", "0.60872567", "0.6025011", "0.5986204", "0.5968379", "0.59590596", "0.59383273", "0.5928829", "0.5924046", "0.5884617", "0.5860035", "0.58543533", "0.5851148", "0.58507204", "0.57925624", "0.5771477" ]
0.7682897
0
Get an existing SSLCertificate resource's state with the given name, id, and optional extra properties used to qualify the lookup.
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, certificate: Optional[pulumi.Input[str]] = None, certificate_id: Optional[pulumi.Input[int]] = None, creation_timestamp: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, expire_time: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, name_prefix: Optional[pulumi.Input[str]] = None, private_key: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, self_link: Optional[pulumi.Input[str]] = None) -> 'SSLCertificate': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _SSLCertificateState.__new__(_SSLCertificateState) __props__.__dict__["certificate"] = certificate __props__.__dict__["certificate_id"] = certificate_id __props__.__dict__["creation_timestamp"] = creation_timestamp __props__.__dict__["description"] = description __props__.__dict__["expire_time"] = expire_time __props__.__dict__["name"] = name __props__.__dict__["name_prefix"] = name_prefix __props__.__dict__["private_key"] = private_key __props__.__dict__["project"] = project __props__.__dict__["self_link"] = self_link return SSLCertificate(resource_name, opts=opts, __props__=__props__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n certificate: Optional[pulumi.Input[str]] = None,\n certificate_id: Optional[pulumi.Input[str]] = None,\n certificate_name: Optional[pulumi.Input[str]] = None,\n domain: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n private_key: Optional[pulumi.Input[str]] = None) -> 'Certificate':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _CertificateState.__new__(_CertificateState)\n\n __props__.__dict__[\"certificate\"] = certificate\n __props__.__dict__[\"certificate_id\"] = certificate_id\n __props__.__dict__[\"certificate_name\"] = certificate_name\n __props__.__dict__[\"domain\"] = domain\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"private_key\"] = private_key\n return Certificate(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n certificate_body: Optional[pulumi.Input[str]] = None,\n certificate_chain: Optional[pulumi.Input[str]] = None,\n expiration: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n name_prefix: Optional[pulumi.Input[str]] = None,\n path: Optional[pulumi.Input[str]] = None,\n private_key: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n upload_date: Optional[pulumi.Input[str]] = None) -> 'ServerCertificate':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ServerCertificateState.__new__(_ServerCertificateState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"certificate_body\"] = certificate_body\n __props__.__dict__[\"certificate_chain\"] = certificate_chain\n __props__.__dict__[\"expiration\"] = expiration\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"name_prefix\"] = name_prefix\n __props__.__dict__[\"path\"] = path\n __props__.__dict__[\"private_key\"] = private_key\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"upload_date\"] = upload_date\n return ServerCertificate(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n certificate: Optional[pulumi.Input[str]] = None,\n csr: Optional[pulumi.Input[str]] = None,\n expires_on: Optional[pulumi.Input[str]] = None,\n hostnames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n min_days_for_renewal: Optional[pulumi.Input[int]] = None,\n request_type: Optional[pulumi.Input[str]] = None,\n requested_validity: Optional[pulumi.Input[int]] = None) -> 'OriginCaCertificate':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _OriginCaCertificateState.__new__(_OriginCaCertificateState)\n\n __props__.__dict__[\"certificate\"] = certificate\n __props__.__dict__[\"csr\"] = csr\n __props__.__dict__[\"expires_on\"] = expires_on\n __props__.__dict__[\"hostnames\"] = hostnames\n __props__.__dict__[\"min_days_for_renewal\"] = min_days_for_renewal\n __props__.__dict__[\"request_type\"] = request_type\n __props__.__dict__[\"requested_validity\"] = requested_validity\n return OriginCaCertificate(resource_name, opts=opts, __props__=__props__)", "def get(resource_name, id, opts=None, arn=None, certificate=None, certificate_authority_configuration=None, certificate_chain=None, certificate_signing_request=None, enabled=None, not_after=None, not_before=None, permanent_deletion_time_in_days=None, revocation_configuration=None, serial=None, status=None, tags=None, type=None):\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n __props__[\"arn\"] = arn\n __props__[\"certificate\"] = certificate\n __props__[\"certificate_authority_configuration\"] = certificate_authority_configuration\n __props__[\"certificate_chain\"] = certificate_chain\n __props__[\"certificate_signing_request\"] = certificate_signing_request\n __props__[\"enabled\"] = enabled\n __props__[\"not_after\"] = not_after\n __props__[\"not_before\"] = not_before\n __props__[\"permanent_deletion_time_in_days\"] = permanent_deletion_time_in_days\n __props__[\"revocation_configuration\"] = revocation_configuration\n __props__[\"serial\"] = serial\n __props__[\"status\"] = status\n __props__[\"tags\"] = tags\n __props__[\"type\"] = type\n return CertificateAuthority(resource_name, opts=opts, __props__=__props__)", "def get_certificate(self, cert_id):\r\n return self.ssl.getObject(id=cert_id)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n certificate_id: Optional[pulumi.Input[str]] = None) -> 'LocalRulestackOutboundUntrustCertificateAssociation':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _LocalRulestackOutboundUntrustCertificateAssociationState.__new__(_LocalRulestackOutboundUntrustCertificateAssociationState)\n\n __props__.__dict__[\"certificate_id\"] = certificate_id\n return LocalRulestackOutboundUntrustCertificateAssociation(resource_name, opts=opts, __props__=__props__)", "def get_certificates_by_pcc(conn: dict, id: str) -> dict:\n return get(conn, f\"{S3PCCS}/{id}/certificates\")", "def get_ssl_certificates_v1(self, skill_id, **kwargs):\n # type: (str, **Any) -> Union[ApiResponse, object, StandardizedError_f5106a89, SSLCertificatePayload_97891902]\n operation_name = \"get_ssl_certificates_v1\"\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'skill_id' is set\n if ('skill_id' not in params) or (params['skill_id'] is None):\n raise ValueError(\n \"Missing the required parameter `skill_id` when calling `\" + operation_name + \"`\")\n\n resource_path = '/v1/skills/{skillId}/sslCertificateSets/~latest'\n resource_path = resource_path.replace('{format}', 'json')\n\n path_params = {} # type: Dict\n if 'skill_id' in params:\n path_params['skillId'] = params['skill_id']\n\n query_params = [] # type: List\n\n header_params = [] # type: List\n\n body_params = None\n header_params.append(('Content-type', 'application/json'))\n header_params.append(('User-Agent', self.user_agent))\n\n # Response Type\n full_response = False\n if 'full_response' in params:\n full_response = params['full_response']\n\n # Authentication setting\n access_token = self._lwa_service_client.get_access_token_from_refresh_token()\n authorization_value = \"Bearer \" + access_token\n header_params.append(('Authorization', authorization_value))\n\n error_definitions = [] # type: List\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.ssl_certificate_payload.SSLCertificatePayload\", status_code=200, message=\"Response contains the latest version of the ssl certificates.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=401, message=\"The auth token is invalid/expired or doesn&#39;t have access to the resource.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=404, message=\"The resource being requested is not found.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=429, message=\"Exceeds the permitted request limit. Throttling criteria includes total requests, per API, ClientId, and CustomerId.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=500, message=\"Internal Server Error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=503, message=\"Service Unavailable.\"))\n\n api_response = self.invoke(\n method=\"GET\",\n endpoint=self._api_endpoint,\n path=resource_path,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n body=body_params,\n response_definitions=error_definitions,\n response_type=\"ask_smapi_model.v1.skill.ssl_certificate_payload.SSLCertificatePayload\")\n\n if full_response:\n return api_response\n return api_response.body", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n additional_locations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceAdditionalLocationArgs']]]]] = None,\n certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCertificateArgs']]]]] = None,\n client_certificate_enabled: Optional[pulumi.Input[bool]] = None,\n delegation: Optional[pulumi.Input[pulumi.InputType['ServiceDelegationArgs']]] = None,\n developer_portal_url: Optional[pulumi.Input[str]] = None,\n gateway_disabled: Optional[pulumi.Input[bool]] = None,\n gateway_regional_url: Optional[pulumi.Input[str]] = None,\n gateway_url: Optional[pulumi.Input[str]] = None,\n hostname_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceHostnameConfigurationArgs']]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['ServiceIdentityArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n management_api_url: Optional[pulumi.Input[str]] = None,\n min_api_version: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n notification_sender_email: Optional[pulumi.Input[str]] = None,\n policy: Optional[pulumi.Input[pulumi.InputType['ServicePolicyArgs']]] = None,\n portal_url: Optional[pulumi.Input[str]] = None,\n private_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n protocols: Optional[pulumi.Input[pulumi.InputType['ServiceProtocolsArgs']]] = None,\n public_ip_address_id: Optional[pulumi.Input[str]] = None,\n public_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n publisher_email: Optional[pulumi.Input[str]] = None,\n publisher_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n scm_url: Optional[pulumi.Input[str]] = None,\n security: Optional[pulumi.Input[pulumi.InputType['ServiceSecurityArgs']]] = None,\n sign_in: Optional[pulumi.Input[pulumi.InputType['ServiceSignInArgs']]] = None,\n sign_up: Optional[pulumi.Input[pulumi.InputType['ServiceSignUpArgs']]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tenant_access: Optional[pulumi.Input[pulumi.InputType['ServiceTenantAccessArgs']]] = None,\n virtual_network_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceVirtualNetworkConfigurationArgs']]] = None,\n virtual_network_type: Optional[pulumi.Input[str]] = None,\n zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ServiceState.__new__(_ServiceState)\n\n __props__.__dict__[\"additional_locations\"] = additional_locations\n __props__.__dict__[\"certificates\"] = certificates\n __props__.__dict__[\"client_certificate_enabled\"] = client_certificate_enabled\n __props__.__dict__[\"delegation\"] = delegation\n __props__.__dict__[\"developer_portal_url\"] = developer_portal_url\n __props__.__dict__[\"gateway_disabled\"] = gateway_disabled\n __props__.__dict__[\"gateway_regional_url\"] = gateway_regional_url\n __props__.__dict__[\"gateway_url\"] = gateway_url\n __props__.__dict__[\"hostname_configuration\"] = hostname_configuration\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"management_api_url\"] = management_api_url\n __props__.__dict__[\"min_api_version\"] = min_api_version\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"notification_sender_email\"] = notification_sender_email\n __props__.__dict__[\"policy\"] = policy\n __props__.__dict__[\"portal_url\"] = portal_url\n __props__.__dict__[\"private_ip_addresses\"] = private_ip_addresses\n __props__.__dict__[\"protocols\"] = protocols\n __props__.__dict__[\"public_ip_address_id\"] = public_ip_address_id\n __props__.__dict__[\"public_ip_addresses\"] = public_ip_addresses\n __props__.__dict__[\"public_network_access_enabled\"] = public_network_access_enabled\n __props__.__dict__[\"publisher_email\"] = publisher_email\n __props__.__dict__[\"publisher_name\"] = publisher_name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"scm_url\"] = scm_url\n __props__.__dict__[\"security\"] = security\n __props__.__dict__[\"sign_in\"] = sign_in\n __props__.__dict__[\"sign_up\"] = sign_up\n __props__.__dict__[\"sku_name\"] = sku_name\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tenant_access\"] = tenant_access\n __props__.__dict__[\"virtual_network_configuration\"] = virtual_network_configuration\n __props__.__dict__[\"virtual_network_type\"] = virtual_network_type\n __props__.__dict__[\"zones\"] = zones\n return Service(resource_name, opts=opts, __props__=__props__)", "def find_certificate(p): # find_certificate(props, /)\n\n for page in acm.get_paginator('list_certificates').paginate():\n for certificate in page['CertificateSummaryList']:\n log_info(certificate)\n\n if p['DomainName'].lower() == certificate['DomainName']:\n tags = {tag['Key']: tag['Value'] for tag in\n acm.list_tags_for_certificate(**{'CertificateArn': certificate['CertificateArn']})['Tags']}\n\n if (tags.get('cloudformation:' + 'logical-id') == e['LogicalResourceId'] and\n tags.get('cloudformation:' + 'stack-id') == e['StackId'] and\n tags.get('cloudformation:' + 'properties') == hash_func(p)\n ):\n return certificate['CertificateArn']", "def request_cert():\n\n api_request = shallow_copy(props)\n\n for key in ['ServiceToken', 'Region', 'Tags', 'Route53RoleArn']:\n api_request.pop(key, None)\n\n if 'ValidationMethod' in props:\n if props['ValidationMethod'] == 'DNS':\n\n # Check that we have all the hosted zone information we need to validate\n # before we create the certificate\n for name in set([props['DomainName']] + props.get('SubjectAlternativeNames', [])):\n get_zone_for(name)\n\n del api_request['DomainValidationOptions']\n\n e['PhysicalResourceId'] = acm.request_certificate(\n IdempotencyToken=i_token,\n **api_request\n )['CertificateArn']\n add_tags()", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'CryptoKey':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = CryptoKeyArgs.__new__(CryptoKeyArgs)\n\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"crypto_key_backend\"] = None\n __props__.__dict__[\"crypto_key_id\"] = None\n __props__.__dict__[\"destroy_scheduled_duration\"] = None\n __props__.__dict__[\"import_only\"] = None\n __props__.__dict__[\"key_ring_id\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"next_rotation_time\"] = None\n __props__.__dict__[\"primary\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"purpose\"] = None\n __props__.__dict__[\"rotation_period\"] = None\n __props__.__dict__[\"skip_initial_version_creation\"] = None\n __props__.__dict__[\"version_template\"] = None\n return CryptoKey(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Canary':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = CanaryArgs.__new__(CanaryArgs)\n\n __props__.__dict__[\"artifact_config\"] = None\n __props__.__dict__[\"artifact_s3_location\"] = None\n __props__.__dict__[\"code\"] = None\n __props__.__dict__[\"delete_lambda_resources_on_canary_deletion\"] = None\n __props__.__dict__[\"execution_role_arn\"] = None\n __props__.__dict__[\"failure_retention_period\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"run_config\"] = None\n __props__.__dict__[\"runtime_version\"] = None\n __props__.__dict__[\"schedule\"] = None\n __props__.__dict__[\"start_canary_after_creation\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"success_retention_period\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"visual_reference\"] = None\n __props__.__dict__[\"vpc_config\"] = None\n return Canary(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n cak: Optional[pulumi.Input[str]] = None,\n ckn: Optional[pulumi.Input[str]] = None,\n connection_id: Optional[pulumi.Input[str]] = None,\n secret_arn: Optional[pulumi.Input[str]] = None,\n start_on: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None) -> 'MacsecKeyAssociation':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _MacsecKeyAssociationState.__new__(_MacsecKeyAssociationState)\n\n __props__.__dict__[\"cak\"] = cak\n __props__.__dict__[\"ckn\"] = ckn\n __props__.__dict__[\"connection_id\"] = connection_id\n __props__.__dict__[\"secret_arn\"] = secret_arn\n __props__.__dict__[\"start_on\"] = start_on\n __props__.__dict__[\"state\"] = state\n return MacsecKeyAssociation(resource_name, opts=opts, __props__=__props__)", "def test_get_certificate_by_id(self):\n self.client.post(\n '/api/v1/certificates', data=json.dumps(new_certificate),\n content_type='application/json',\n headers=self.get_registrar_token())\n response = self.client.get(\n '/api/v1/certificates/1', content_type='application/json',\n headers=self.get_token())\n result = json.loads(response.data.decode())\n self.assertEqual(result['message'],\n 'Certificate retrieved successfully')\n assert response.status_code == 200", "def get_ssl_certificate():", "def test_get_non_existing_certificate_by_id(self):\n self.client.post(\n '/api/v1/certificates', data=json.dumps(new_certificate),\n content_type='application/json',\n headers=self.get_registrar_token())\n response = self.client.get(\n '/api/v1/certificates/10', content_type='application/json',\n headers=self.get_token())\n result = json.loads(response.data.decode())\n self.assertEqual(result['message'],\n 'Certificate not found')\n assert response.status_code == 404", "def get(self, cache_id):\n return self.certificates.get(cache_id)", "def get_x509_certificate_by_name(certs, key_name):\n for cert in certs['certificates']:\n if cert['key_name'] == key_name:\n return cert['x509_certificate_pem']\n raise CertificateError('Certificate \\'%s\\' not found' % key_name)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n config: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[pulumi.InputType['SyntheticsPrivateLocationMetadataArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'SyntheticsPrivateLocation':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _SyntheticsPrivateLocationState.__new__(_SyntheticsPrivateLocationState)\n\n __props__.__dict__[\"config\"] = config\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"metadata\"] = metadata\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"tags\"] = tags\n return SyntheticsPrivateLocation(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n accessor: Optional[pulumi.Input[str]] = None,\n binddn: Optional[pulumi.Input[str]] = None,\n bindpass: Optional[pulumi.Input[str]] = None,\n case_sensitive_names: Optional[pulumi.Input[bool]] = None,\n certificate: Optional[pulumi.Input[str]] = None,\n client_tls_cert: Optional[pulumi.Input[str]] = None,\n client_tls_key: Optional[pulumi.Input[str]] = None,\n deny_null_bind: Optional[pulumi.Input[bool]] = None,\n description: Optional[pulumi.Input[str]] = None,\n disable_remount: Optional[pulumi.Input[bool]] = None,\n discoverdn: Optional[pulumi.Input[bool]] = None,\n groupattr: Optional[pulumi.Input[str]] = None,\n groupdn: Optional[pulumi.Input[str]] = None,\n groupfilter: Optional[pulumi.Input[str]] = None,\n insecure_tls: Optional[pulumi.Input[bool]] = None,\n local: Optional[pulumi.Input[bool]] = None,\n max_page_size: Optional[pulumi.Input[int]] = None,\n namespace: Optional[pulumi.Input[str]] = None,\n path: Optional[pulumi.Input[str]] = None,\n starttls: Optional[pulumi.Input[bool]] = None,\n tls_max_version: Optional[pulumi.Input[str]] = None,\n tls_min_version: Optional[pulumi.Input[str]] = None,\n token_bound_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n token_explicit_max_ttl: Optional[pulumi.Input[int]] = None,\n token_max_ttl: Optional[pulumi.Input[int]] = None,\n token_no_default_policy: Optional[pulumi.Input[bool]] = None,\n token_num_uses: Optional[pulumi.Input[int]] = None,\n token_period: Optional[pulumi.Input[int]] = None,\n token_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n token_ttl: Optional[pulumi.Input[int]] = None,\n token_type: Optional[pulumi.Input[str]] = None,\n upndomain: Optional[pulumi.Input[str]] = None,\n url: Optional[pulumi.Input[str]] = None,\n use_token_groups: Optional[pulumi.Input[bool]] = None,\n userattr: Optional[pulumi.Input[str]] = None,\n userdn: Optional[pulumi.Input[str]] = None,\n userfilter: Optional[pulumi.Input[str]] = None,\n username_as_alias: Optional[pulumi.Input[bool]] = None) -> 'AuthBackend':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AuthBackendState.__new__(_AuthBackendState)\n\n __props__.__dict__[\"accessor\"] = accessor\n __props__.__dict__[\"binddn\"] = binddn\n __props__.__dict__[\"bindpass\"] = bindpass\n __props__.__dict__[\"case_sensitive_names\"] = case_sensitive_names\n __props__.__dict__[\"certificate\"] = certificate\n __props__.__dict__[\"client_tls_cert\"] = client_tls_cert\n __props__.__dict__[\"client_tls_key\"] = client_tls_key\n __props__.__dict__[\"deny_null_bind\"] = deny_null_bind\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"disable_remount\"] = disable_remount\n __props__.__dict__[\"discoverdn\"] = discoverdn\n __props__.__dict__[\"groupattr\"] = groupattr\n __props__.__dict__[\"groupdn\"] = groupdn\n __props__.__dict__[\"groupfilter\"] = groupfilter\n __props__.__dict__[\"insecure_tls\"] = insecure_tls\n __props__.__dict__[\"local\"] = local\n __props__.__dict__[\"max_page_size\"] = max_page_size\n __props__.__dict__[\"namespace\"] = namespace\n __props__.__dict__[\"path\"] = path\n __props__.__dict__[\"starttls\"] = starttls\n __props__.__dict__[\"tls_max_version\"] = tls_max_version\n __props__.__dict__[\"tls_min_version\"] = tls_min_version\n __props__.__dict__[\"token_bound_cidrs\"] = token_bound_cidrs\n __props__.__dict__[\"token_explicit_max_ttl\"] = token_explicit_max_ttl\n __props__.__dict__[\"token_max_ttl\"] = token_max_ttl\n __props__.__dict__[\"token_no_default_policy\"] = token_no_default_policy\n __props__.__dict__[\"token_num_uses\"] = token_num_uses\n __props__.__dict__[\"token_period\"] = token_period\n __props__.__dict__[\"token_policies\"] = token_policies\n __props__.__dict__[\"token_ttl\"] = token_ttl\n __props__.__dict__[\"token_type\"] = token_type\n __props__.__dict__[\"upndomain\"] = upndomain\n __props__.__dict__[\"url\"] = url\n __props__.__dict__[\"use_token_groups\"] = use_token_groups\n __props__.__dict__[\"userattr\"] = userattr\n __props__.__dict__[\"userdn\"] = userdn\n __props__.__dict__[\"userfilter\"] = userfilter\n __props__.__dict__[\"username_as_alias\"] = username_as_alias\n return AuthBackend(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n direction: Optional[pulumi.Input[str]] = None,\n ethertype: Optional[pulumi.Input[str]] = None,\n port_range_max: Optional[pulumi.Input[int]] = None,\n port_range_min: Optional[pulumi.Input[int]] = None,\n protocol: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n remote_group_id: Optional[pulumi.Input[str]] = None,\n remote_ip_prefix: Optional[pulumi.Input[str]] = None,\n security_group_id: Optional[pulumi.Input[str]] = None,\n tenant_id: Optional[pulumi.Input[str]] = None) -> 'SecGroupRule':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _SecGroupRuleState.__new__(_SecGroupRuleState)\n\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"direction\"] = direction\n __props__.__dict__[\"ethertype\"] = ethertype\n __props__.__dict__[\"port_range_max\"] = port_range_max\n __props__.__dict__[\"port_range_min\"] = port_range_min\n __props__.__dict__[\"protocol\"] = protocol\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"remote_group_id\"] = remote_group_id\n __props__.__dict__[\"remote_ip_prefix\"] = remote_ip_prefix\n __props__.__dict__[\"security_group_id\"] = security_group_id\n __props__.__dict__[\"tenant_id\"] = tenant_id\n return SecGroupRule(resource_name, opts=opts, __props__=__props__)", "async def get_certificate(self, certificate_name: str, **kwargs) -> KeyVaultCertificate:\n bundle = await self._client.get_certificate(\n vault_base_url=self.vault_url,\n certificate_name=certificate_name,\n certificate_version=\"\",\n **kwargs\n )\n return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)", "def _get_cached_certificate_with_key(self, cache_id):\n with stats.timer('get_cached_certificate_with_key'):\n item = self.cache.get(cache_id)\n # We're the first thread attempting to get this certificate\n if not item:\n return {}\n # A certificate hasn't been issued yet, but since the cache id\n # exists, another thread has requested the certificate.\n if not item.response and item.lock:\n raise CertificateNotReadyError()\n # If the other thread failed to get the certificate, we need to\n # ensure that this thread attempts to fetch a certificate.\n return item.response", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Subscription':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"accessed_at\"] = None\n __props__[\"auto_delete_on_idle\"] = None\n __props__[\"count_details\"] = None\n __props__[\"created_at\"] = None\n __props__[\"dead_lettering_on_filter_evaluation_exceptions\"] = None\n __props__[\"dead_lettering_on_message_expiration\"] = None\n __props__[\"default_message_time_to_live\"] = None\n __props__[\"enable_batched_operations\"] = None\n __props__[\"entity_availability_status\"] = None\n __props__[\"is_read_only\"] = None\n __props__[\"location\"] = None\n __props__[\"lock_duration\"] = None\n __props__[\"max_delivery_count\"] = None\n __props__[\"message_count\"] = None\n __props__[\"name\"] = None\n __props__[\"requires_session\"] = None\n __props__[\"status\"] = None\n __props__[\"type\"] = None\n __props__[\"updated_at\"] = None\n return Subscription(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n license_count: Optional[pulumi.Input[int]] = None,\n license_count_hard_limit: Optional[pulumi.Input[bool]] = None,\n license_counting_type: Optional[pulumi.Input[str]] = None,\n license_rules: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n owner_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'LicenseConfiguration':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _LicenseConfigurationState.__new__(_LicenseConfigurationState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"license_count\"] = license_count\n __props__.__dict__[\"license_count_hard_limit\"] = license_count_hard_limit\n __props__.__dict__[\"license_counting_type\"] = license_counting_type\n __props__.__dict__[\"license_rules\"] = license_rules\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"owner_account_id\"] = owner_account_id\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n return LicenseConfiguration(resource_name, opts=opts, __props__=__props__)", "def credential_get(uniqueID: str):\n\n cert = safeisland.certificate(uniqueID)\n return {\"payload\": cert}", "def get_ssl_certificate() :", "def lookup(job_id: str) -> JobState:\n job = JobState(job_id)\n job.update()\n return job", "def get_by_id(c_id):\n return cr.get_by_id(c_id)" ]
[ "0.6610114", "0.63369817", "0.6177877", "0.6108327", "0.574682", "0.5677764", "0.5492402", "0.5390177", "0.5345001", "0.5200239", "0.51293224", "0.5123964", "0.508775", "0.50864744", "0.504956", "0.5040337", "0.50251144", "0.5013605", "0.49793026", "0.49746943", "0.49307314", "0.49166772", "0.48898554", "0.4884491", "0.4880234", "0.48757252", "0.48556778", "0.4838704", "0.47963178", "0.47802937" ]
0.70121515
0
checks that the user wants to finish or not. Perform some verification of the input.
def check_if_user_has_finished(): ok_to_finish = True user_input_accepted = False while not user_input_accepted: user_input = input("Do you want to finish (y/n): ").lower() if user_input == 'y': user_input_accepted = True elif user_input == 'n': ok_to_finish = False user_input_accepted = True else: print('Response must be (y/n), please try again') return ok_to_finish
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_or_exit(msg):\n while True:\n user_input = raw_input(\"%s (y/n): \" % msg).lower()\n if user_input in ['y', 'yes']:\n print\n return\n if user_input in ['n', 'no']:\n print\n print_warning(\"Please complete the required steps and then \"\n \"re-run the script.\")\n sys.exit(1)", "def end_input(self):\n inp = input()\n if inp.upper() == \"Q\":\n return False\n if inp == \"\" \\\n \"\":\n return True\n return self.end_input", "def stop(self):\n command = input(\"Enter anything to finish (or 'exit' to cancel)>>>\")\n return command != 'exit'", "def checkInput(userInput):\n if userInput == 'exit':\n return 0\n return 1", "def continue_exit(data):\n print('-' * 80)\n print(\"You can return to the main menu to review a different section,\")\n print(\"or you can terminate the programme.\\n\")\n\n while True:\n result = input(\"Would you like to continue? 'y'/'n': \\n\")\n if validate_choice(result):\n if result == 'y' or result == 'Y':\n print(f\"You typed '{result}'. Returning you to the menu.\\n\")\n print(\"Reloading menu...\")\n print(\"\")\n print('-' * 80)\n return True\n else:\n print(f\"You typed '{result}'. Programme will terminate...\\n\")\n print(\"Thanks for using Promotional Sales Review System...\\n\")\n print(\"Have a nice day! :-)\\n\")\n print('-' * 80)\n return False\n else:\n print(\"Returning to User choice...\")\n print('-' * 80)", "def keep_going(text=\"Do you wish to continue? Answer Y or N.\"):\n answer = input(text)\n\n if answer == 'Y':\n print(\"The script is now running....\")\n else:\n print(\"You have chosen to quit this program\")\n raise SystemExit", "def isFinished():", "def isFinished():", "def isFinished():", "def _check_for_incomplete_input(self):\n pass", "def handle_inputs(self):\n user_input = \"\"\n while user_input != \"exit\":\n self.print_divider()\n user_input = input()\n self.do_action_for_input(user_input)", "def _check_results(self):\n if not 'EXECUTION OF GAMESS TERMINATED NORMALLY' in self.file_dic['output']:\n print self.job_name + \" didn't finish\"\n raise TypeError('Calculation didn\\'t finish')", "def _is_user_wants_to_continue(self):\n\n # dummy value to get in while\n user_input = -1\n while user_input != 1 and user_input != 2:\n\n try:\n # convert the string into int\n user_input = int(input())\n except ValueError:\n print(\"Please enter a number\")\n continue\n except Exception as e:\n print(\"something went wrong please try again \" + str(e))\n continue\n\n # check if the user_input was one of the options\n # if not present a error massage and try again\n if user_input != 1 and user_input != 2:\n print(\"Please enter a valid number(1-2)\")\n continue\n\n return user_input == 1", "def is_done():\n return False", "def wait_for_input(self):\n pass", "def user_input():\n ans = input('Continue? : y/n ')\n if ans == 'n':\n return False\n else:\n return True", "def wait() -> None:\n\n process_input(input())", "def main():\n user_input = user_input_state()\n check_user_input(user_input)", "def finish():\n pass", "def done(self) -> bool:", "def validate_action(self, message=\"This action may delete data from the database. This action cannot be undone.\\nDo you wish to continue? (Y/N): \"):\n \n while True:\n print('\\n\\n')\n inp = input(message)\n \n if (inp.upper() == 'Y'):\n return True\n elif (inp.upper() == 'N'):\n return False\n \n print(\"Invalid input. Try again\")", "def wait_for_user_input():\n\n input(\"Pulse ENTER para continuar...\")", "def successful_unlock_eng(self):\n choice = input(\"Do you want to start the repair now? [Y/N]: \")\n if choice.lower() == 'y':\n print(\"Repair in process..\")\n else:\n self.successful_unlock_eng()", "def _is_done(self):\n pass", "def check_game_end(self):\r\n\r\n if np.all(self.remaining == -1): # end of game\r\n self.show_results() # show the final results\r\n sys.exit() # exit the program\r", "def continue_with_outgoing():\n\n print('If you proceed with the release, they will be included in the '\n 'release and pushed.')\n print('Are you sure about continuing the release process?')\n\n while True:\n choice = raw_input('Please choose (yes / no): ').lower().strip()\n\n if choice == 'yes':\n return True\n if choice == 'no':\n return False", "def finish():", "def finish():", "def finish():", "def finish():" ]
[ "0.7116285", "0.6960877", "0.6919499", "0.65994936", "0.65895295", "0.65874827", "0.65681756", "0.65681756", "0.65681756", "0.65426457", "0.6500775", "0.649839", "0.6491906", "0.6482516", "0.6437172", "0.64225864", "0.6373759", "0.63676196", "0.63667154", "0.634533", "0.62921405", "0.6264746", "0.6259476", "0.62191117", "0.6191152", "0.61731815", "0.6152138", "0.6152138", "0.6152138", "0.6152138" ]
0.792566
0
Generate image from Blender scene file (.blend)
def generate_blenderimage(scene_file, output=None, script_file=None, frame=1): cmd = [BLENDER, "-b", scene_file, "-y"] previous_wd = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(scene_file))) if script_file: cmd.append("-P") cmd.append(script_file) if output: outbase, ext = os.path.splitext(output) cmd.append("-o") cmd.append(output) print(ext) print(ext[1:]) if ext: cmd.append("-F") cmd.append(ext[1:].upper()) cmd.append("-noaudio") cmd.append("-f") cmd.append(str(frame)) print(cmd) exec_cmd(cmd) os.chdir(previous_wd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filesToBlender(context, prefix, max_blocks=200):\n # Get reference matrix\n refMatrix = None\n if context.scene.maps_models_importer_is_ref_matrix_valid:\n values = context.scene.maps_models_importer_ref_matrix\n refMatrix = Matrix((values[0:4], values[4:8], values[8:12], values[12:16]))\n\n drawcallId = 0\n while max_blocks <= 0 or drawcallId < max_blocks:\n if not os.path.isfile(\"{}{:05d}-indices.bin\".format(prefix, drawcallId)):\n break\n\n try:\n indices, positions, uvs, img, constants = loadData(prefix, drawcallId)\n except FileNotFoundError as err:\n print(\"Skipping ({})\".format(err))\n continue\n\n uvOffsetScale, matrix, refMatrix = extractUniforms(constants, refMatrix)\n\n # Make triangles from triangle strip index buffer\n n = len(indices)\n tris = [ [ indices[i+j] for j in [[0,1,2],[0,2,1]][i%2] ] for i in range(n - 3)]\n tris = [ t for t in tris if t[0] != t[1] and t[0] != t[2] and t[1] != t[2] ]\n verts = [ [ p[0], p[1], p[2] ] for p in positions ]\n\n [ou, ov, su, sv] = uvOffsetScale\n uvs = [ [ (floor(u * 65535.0 + 0.5) + ou) * su, (floor(v * 65535.0 + 0.5) + ov) * sv ] for u, v in uvs ]\n \n if len(indices) == 0:\n continue\n\n mesh_name = \"BuildingMesh-{:05d}\".format(drawcallId)\n obj = addMesh(context, mesh_name, verts, tris, uvs)\n obj.matrix_world = matrix\n\n mat_name = \"BuildingMat-{:05d}\".format(drawcallId)\n addImageMaterial(mat_name, obj, img)\n\n drawcallId += 1\n\n # Save reference matrix\n if refMatrix:\n values = sum([list(v) for v in refMatrix], [])\n context.scene.maps_models_importer_ref_matrix = values\n context.scene.maps_models_importer_is_ref_matrix_valid = True", "def generate_img_with_params(scene_file, script_name=\"tmp.py\", xres=800,\n yres=600, crop=None, use_compositing=False,\n output=None, frame=1):\n\n if crop is None:\n crop = [0, 1, 0, 1]\n\n crop_file_src = generate_blender_crop_file([xres, yres], [crop[0], crop[1]],\n [crop[2], crop[3]], use_compositing)\n\n scene_dir = os.path.dirname(os.path.abspath(scene_file))\n new_scriptpath = os.path.join(scene_dir, script_name)\n\n with open(new_scriptpath, 'w') as f:\n f.write(crop_file_src)\n\n generate_blenderimage(scene_file, output, new_scriptpath, frame)", "def setup_scene_for_rgb_render(scene, outdir):\n # Use node rendering for python control\n scene.use_nodes = True\n tree = scene.node_tree\n links = tree.links\n\n # Make sure there are no existing nodes\n for node in tree.nodes:\n tree.nodes.remove(node)\n\n # Set up a renderlayer and plug it into our remapping layer\n inp = tree.nodes.new('CompositorNodeRLayers')\n\n if (bpy.app.version[1] >= 70): # Don't apply color transformation -- changed in Blender 2.70\n scene.view_settings.view_transform = 'Raw'\n scene.sequencer_colorspace_settings.name = 'Non-Color'\n\n # Save it out\n if outdir:\n out = tree.nodes.new('CompositorNodeOutputFile')\n ident = str(uu.uuid4())\n out.file_slots[0].path = ident\n out.base_path = outdir\n # out.format.color_mode = 'BW'\n # out.format.color_depth = settings.DEPTH_BITS_PER_CHANNEL\n out.format.color_mode = 'RGB'\n out.format.color_depth = settings.COLOR_BITS_PER_CHANNEL\n out.format.file_format = settings.PREFERRED_IMG_EXT.upper()\n links.new(inp.outputs[0], out.inputs[0])\n ext = utils.img_format_to_ext[settings.PREFERRED_IMG_EXT.lower()]\n temp_filename = \"{0}0001.{1}\".format(ident, ext)\n return os.path.join(outdir, temp_filename)\n else:\n out = tree.nodes.new('CompositorNodeComposite')\n links.new(inp.outputs[0], out.inputs[0])\n return None", "def snapshot(self, components=4):\n fbo = self.fbo\n data = fbo.read(components=3)\n from PIL import Image\n return Image.frombytes('RGB', fbo.size, data).transpose(Image.FLIP_TOP_BOTTOM)", "def get_texture(filename, x, y):\r\n return arcade.load_texture(filename, x * 64, y * 64, width=64, height=64)", "def get_texture(filename, x, y):\r\n return arcade.load_texture(filename, x * 64, y * 64, width=64, height=64)", "def uvSnapshot(arg=None):\n\n FILE_NAME = 'uv.jpg'\n RESOLUTION = 4096\n\n sel = getListSelection()\n ac = autoConnect.AutoConnect()\n\n if ac.PHOTOSHOP_PATH is None:\n raise WindowsError('Couldn\\'t find Adobe Photoshop.')\n\n for s in sel:\n shaderName = shaderUtility.customStringToShaderName(s)\n if [f for f in ac.DATA.keys() if shaderName == f] == []:\n print '# Shader doesn\\'t have an AutoConnect setup.'\n continue\n\n # Launch photoshop process\n\n editTexture()\n\n usedBy = shaderUtility.data[shaderName]['usedBy']\n parents = []\n for u in usedBy:\n parent = cmds.listRelatives(u, allParents=True,\n path=True)[0]\n parents.append(parent)\n cmds.select(parents)\n\n p = path.normpath(path.join(ac.workspace, ac.sourceImages,\n shaderName))\n if os.path.isdir(p) is not True:\n os.mkdir(p)\n path.normpath(path.join(p, FILE_NAME))\n cmds.uvSnapshot(\n name=path.normpath(path.join(p, FILE_NAME)),\n overwrite=True,\n antiAliased=True,\n fileFormat='jpg',\n xResolution=RESOLUTION,\n yResolution=RESOLUTION,\n )\n\n # Let's call Photoshop\n\n script = psCommand.script\n PS_SCRIPT = script.replace('<UV_Image_Path>',\n path.normpath(path.join(p,\n FILE_NAME)).replace('\\\\', '\\\\\\\\'\n )).replace('<Texture_PSD_Name>',\n '%s.psd' % shaderName)\n\n tempDir = tempfile.gettempdir()\n scriptFile = 'psScript.jsx'\n\n p = path.join(tempDir, scriptFile)\n f = open(p, 'w')\n f.write(PS_SCRIPT)\n f.close()\n\n cmd = '\"%s\" \"%s\"' % (path.normpath(ac.PHOTOSHOP_PATH),\n path.normpath(p))\n process = QProcess()\n process.startDetached(cmd)", "def _create_image(self):\n if hasattr(self, '_image') and self._image:\n return self._image\n try:\n command = \"tex2im -b transparent -t cyan\"\n subprocess.run([*command.split(), self._formula])\n except Exception as e:\n import traceback\n print(traceback.format_exc())\n return None\n # tex2im converts to out.png by default\n img = Image.open('out.png').convert('RGBA')\n # create a new rgba image to blend the latex with the alpha\n subprocess.run([\"rm\", \"out.png\"])\n return img", "def render_sample(latents, material_names, include_lights, output_filename, save_scene):\n\n # set output path\n bpy.context.scene.render.filepath = output_filename\n\n # set objects and lights\n update_objects_and_lights(latents, material_names, include_lights)\n\n rgba_background = colorsys.hsv_to_rgb(latents[9] / (2.0 * np.pi), 0.60, 1.0) + (\n 1.0,\n )\n render_utils.change_material(\n bpy.data.objects[\"Ground\"].data.materials[-1], Color=rgba_background\n )\n\n # set scene background\n bpy.ops.render.render(write_still=True)\n\n if save_scene:\n # just for debugging\n bpy.ops.wm.save_as_mainfile(\n filepath=f\"scene_{os.path.basename(output_filename)}.blend\"\n )", "def tex(self):\r\n self.load_opengl()\r\n return self._tex", "def _export_texture_type_image(self, bo, layer, slot):\n texture = slot.texture\n layer_props = texture.plasma_layer\n mipmap = texture.use_mipmap\n\n # Does the image have any alpha at all?\n if texture.image is not None:\n has_alpha = texture.use_calculate_alpha or slot.use_stencil or self._test_image_alpha(texture.image)\n if (texture.image.use_alpha and texture.use_alpha) and not has_alpha:\n warning = \"'{}' wants to use alpha, but '{}' is opaque\".format(texture.name, texture.image.name)\n self._exporter().report.warn(warning, indent=3)\n else:\n has_alpha = True\n\n # First, let's apply any relevant flags\n state = layer.state\n if not slot.use_stencil and not slot.use_map_normal:\n # mutually exclusive blend flags\n if texture.use_alpha and has_alpha:\n if slot.blend_type == \"ADD\":\n state.blendFlags |= hsGMatState.kBlendAlphaAdd\n elif slot.blend_type == \"MULTIPLY\":\n state.blendFlags |= hsGMatState.kBlendAlphaMult\n else:\n state.blendFlags |= hsGMatState.kBlendAlpha\n\n if texture.invert_alpha and has_alpha:\n state.blendFlags |= hsGMatState.kBlendInvertAlpha\n\n if texture.extension in {\"CLIP\", \"EXTEND\"}:\n state.clampFlags |= hsGMatState.kClampTexture\n\n # Now, let's export the plBitmap\n # If the image is None (no image applied in Blender), we assume this is a plDynamicTextMap\n # Otherwise, we toss this layer and some info into our pending texture dict and process it\n # when the exporter tells us to finalize all our shit\n if texture.image is None:\n dtm = self._mgr.find_create_object(plDynamicTextMap, name=\"{}_DynText\".format(layer.key.name), bl=bo)\n dtm.hasAlpha = texture.use_alpha\n # if you have a better idea, let's hear it...\n dtm.visWidth, dtm.visHeight = 1024, 1024\n layer.texture = dtm.key\n else:\n detail_blend = TEX_DETAIL_ALPHA\n if layer_props.is_detail_map and mipmap:\n if slot.blend_type == \"ADD\":\n detail_blend = TEX_DETAIL_ADD\n elif slot.blend_type == \"MULTIPLY\":\n detail_blend = TEX_DETAIL_MULTIPLY\n\n # Herp, derp... Detail blends are all based on alpha\n if layer_props.is_detail_map and not state.blendFlags & hsGMatState.kBlendMask:\n state.blendFlags |= hsGMatState.kBlendDetail\n\n allowed_formats = {\"DDS\"} if mipmap else {\"PNG\", \"BMP\"}\n self.export_prepared_image(texture=texture, owner=layer,\n use_alpha=has_alpha, force_calc_alpha=slot.use_stencil,\n is_detail_map=layer_props.is_detail_map,\n detail_blend=detail_blend,\n detail_fade_start=layer_props.detail_fade_start,\n detail_fade_stop=layer_props.detail_fade_stop,\n detail_opacity_start=layer_props.detail_opacity_start,\n detail_opacity_stop=layer_props.detail_opacity_stop,\n mipmap=mipmap, allowed_formats=allowed_formats,\n indent=3)", "def render_and_save():\n\n rendering_config = configuration.get_config()\n rendering_config = ml_collections.FrozenConfigDict(rendering_config)\n aspect_ratio = rendering_config.aspect_ratio\n height = rendering_config.height\n width = int(aspect_ratio * height)\n\n scene_camera = build_camera(rendering_config, aspect_ratio)\n world = build_world(rendering_config)\n\n # Render.\n logging.info(\"Tracing rays...\")\n render_image_fn = jax.jit(\n render.generate_image,\n static_argnames=[\"height\", \"width\", \"config\"])\n image = render_image_fn(height, width, scene_camera, world, rendering_config)\n\n image = render.correct_gamma(image, gamma=rendering_config.gamma_correction)\n\n logging.info(\"Saving to file...\")\n output.export_as_ppm(image, rendering_config.output_file)\n\n return image", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def toPng(self):\n\t\tif self.isPng:\n\t\t\treturn self\n\t\telse:\n\t\t\treturn textureFile( self.path.replace( self.extension, '.png' ) )", "def atlas_load(filename):\n\tt = pyglet.image.load(os.path.join('textures','ground.png'))\n\ttex = atlas.add(t)\n\treturn tex", "def render_rgb_img(scene, save_path):\n save_path_dir, img_filename = os.path.split(save_path)\n with Profiler(\"Render\") as prf:\n\n utils.set_preset_render_settings(scene, presets=['BASE', 'NON-COLOR'])\n render_save_path = setup_scene_for_rgb_render(scene, save_path_dir)\n prf.step(\"Setup\")\n\n bpy.ops.render.render()\n prf.step(\"Render\")\n\n with Profiler(\"Saving\") as prf:\n shutil.move(render_save_path, save_path)", "def export_image(self, name):\n\t\tred = Color(\"red\")\n\t\tblue = Color(\"blue\")\n\t\twhite = Color(\"white\")\n\t\tblack = Color(\"black\")\n\t\tgold = Color(\"gold\")\n\t\trgb_gold = []\n\t\tfor part in gold.rgb:\n\t\t\tpart = part * 255\n\t\t\trgb_gold.append(part)\n\t\trgb_black = []\n\t\tfor part in black.rgb:\n\t\t\tpart = part * 255\n\t\t\trgb_black.append(part)\n\t\trgb_white = []\n\t\tfor part in white.rgb:\n\t\t\tpart = part * 255\n\t\t\trgb_white.append(part)\n\t\tcolours = list(red.range_to(blue, int(self.grains)))\n\t\timage = np.zeros([self.space.shape[1],self.space.shape[0], 3], dtype=np.uint(8))\n\t\tfor grain in range(self.grains+1):\n\t\t\trgb = []\n\t\t\tfor part in colours[grain-1].rgb:\n\t\t\t\tpart = part * 255\n\t\t\t\trgb.append(part)\n\t\t\tfor cell in self.space.flat:\n\t\t\t\tif cell.state == grain:\n\t\t\t\t\tx,y = cell.find_id()\n\t\t\t\t\timage[x,y] = rgb\n\t\t\t\tif cell.state == 999:\n\t\t\t\t\tx,y = cell.find_id()\n\t\t\t\t\timage[x,y] = rgb_black\n\t\t\t\tif cell.state == 500:\n\t\t\t\t\tx,y = cell.find_id()\n\t\t\t\t\timage[x,y] = rgb_gold\n\t\timg = Image.fromarray(image.astype('uint8'))\n\t\timg = img.resize((self.space.shape[1]*3,self.space.shape[0]*3))\n\t\timg.save('./static/temp/'+str(name)+'.png')", "def TextureFiles():\n import shutil\n\n # first convert the .psd files to .png\n\n FbmDir = glo.outputFolder + '.fbm'\n\n for d1, d2, filenames in os.walk(FbmDir):\n for filename in filenames:\n \"\"\"filename: vitrin_diffuse.psd\n \"\"\"\n # print \"TextureFiles():\", filename\n if filename[-4:].upper() == '.PSD':\n #print \" -- FbmDir:\" , FbmDir\n #print \" -- in the if clause with filename:\" , filename\n #print \" -- glo.outputFolder\" , glo.outputFolder\n # FbmDir = '../fbx/simplelifeembedmedia.fbm'\n # filename = 'shelves_light.PSD'\n PsdToPngConverter(FbmDir, filename)\n\n # Move only the .png file to the ../png/ directory\n filename = filename[:-4] + '.png'\n src = os.path.join(FbmDir, filename)\n elif filename[0] != '.':\n src = os.path.join(FbmDir, filename)\n pass\n\n shutil.copy(src, glo.outputFolder)\n print os.path.join(glo.outputFolder, filename), \"\\n\"\n sys.stdout.flush()\n # for d1, d2, files in os.walk(glo.outputFolder):\n # if not filename in files:\n # #print \"moving: \", files, filename, not filename in files\n # shutil.copy(src, glo.outputFolder)\n # print os.path.join(glo.outputFolder, filename), \"\\n\"\n # else:\n # print \"%s/%s already exists. File not moved\" % (glo.outputFolder,filename)", "def generate_image(self):\n pass", "def import_scene(file_path):\n\n pass", "def construct_blend_dict(assets):\r\n\tblend_dict = {}\r\n\tfor blend in assets:\r\n\t\tblend_dict[blend] = []\r\n\r\n\t\tfile = open(blend, 'rb')\r\n\t\tdata = str(file.read())\r\n\t\tfile.close()\r\n\r\n\t\tparts = data.split(\"OB\")\r\n\t\tfor n in range(len(parts)):\r\n\t\t\tobj_name = \"\"\r\n\t\t\tt = 0\r\n\r\n\t\t\tif n != 0 and parts[n - 1][-2:] == \"00\":\r\n\t\t\t\tif parts[n][:4] != \"JECT\":\r\n\t\t\t\t\tif parts[n][t] not in [\"\\\\\", \" \", \"?\", \".\", \")\", \"(\"]:\r\n\t\t\t\t\t\twhile (parts[n][t] not in [\"\\\\\", \" \"]):\r\n\t\t\t\t\t\t\tobj_name += parts[n][t]\r\n\t\t\t\t\t\t\tt += 1\r\n\r\n\t\t\tif obj_name:\r\n\t\t\t\t#blend_dict[obj_name] = blend\r\n\t\t\t\tblend_dict[blend].append(obj_name)\r\n\r\n\treturn blend_dict", "def render_image(obj_path, occlusion_vec):\n img_path = obj_path.replace('.obj','.png')\n #Adi: Saving the occlusion state in a pickle file as well\n occlusion_path = obj_path.replace('.obj', '')\n with open(occlusion_path, 'wb') as fp:\n pickle.dump(occlusion_vec, fp, protocol=2)\n\n bpy.ops.render.render()\n bpy.data.images['Render Result'].save_render(filepath=img_path)", "def loadImage():\n\timageName = \"images/velazquez_texture_256.jpg\" \n\t# PIL defines an \"open\" method which is Image specific!\n\tim = open(imageName)\n\ttry:\n\t\tix, iy, image = im.size[0], im.size[1], im.tobytes(\"raw\", \"RGBA\", 0, -1)\n\texcept (SystemError, ValueError):\n\t\tix, iy, image = im.size[0], im.size[1], im.tobytes(\"raw\", \"RGBX\", 0, -1)\n\texcept AttributeError:\n\t\tix, iy, image = im.size[0], im.size[1], im.tostring(\"raw\", \"RGBX\", 0, -1)\n\n\t# Generate a texture ID\n\tID = glGenTextures(1)\n\n\t# Make our new texture ID the current 2D texture\n\tglBindTexture(GL_TEXTURE_2D, ID)\n\tglPixelStorei(GL_UNPACK_ALIGNMENT,1)\n\n\t# Copy the texture data into the current texture ID\n\tglTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, image)\n\n\t# Note that only the ID is returned, no reference to the image object or the \n\t# string data is stored in user space. \n\t# The data is only present within the GL after this call exits.\n\treturn ID", "def load_obj_render_BSR(objfile):\n #load a 3d model and render it in 2D\n obj = object3d() \n obj.load(objfile)\n \n #obj.scale_pts( (.1,.2,.1) )\n #obj.rotate_pts( (.1,.1,.1) )\n\n #obj2 = object3d() \n #obj2.load('objects/monkey.obj')\n\n bloody_simple_2drender('2d_render.png', obj=[obj], gridsize=100)", "def rsMakeComp(args):\n\n pathControlSelection = cmds.optionMenu('%s_optionMenu05'\n % windowID, query=True, value=True)\n if cmds.objExists('camera') is False:\n print '# Couldn\\'t find \\'camera\\' #'\n raise RuntimeError('Couldn\\'t find the camera. Make sure the main camera is called \\'camera\\''\n )\n\n si = autoConnect.SceneInfo()\n\n if si.isSceneSaved is False:\n print '# Scene has not been saved yet #'\n raise RuntimeError('_\\n%s' % 'Scene hasn\\'t been saved')\n\n DATA = {}\n\n BASE_PATH = si.renders\n START_FRAME = si.startFrame\n END_FRAME = si.endFrame\n DURATION = si.duration\n currentTime = si.currentTime\n currentWidth = si.currentWidth\n currentHeight = si.currentHeight\n FRAME_RATE = si.frameRate\n EXTENSION = 'exr'\n IMAGE_PATH = None\n\n sn = cmds.file(query=True, sn=True, shortName=True)\n if sn:\n # removes the versioning // Studio AKA specific setting.\n SCENE_NAME = (sn.split('.')[0])[:-4]\n else:\n SCENE_NAME = 'untitled_maya_scene'\n OUTPUT_OPTION = [w for w in renderOutput.SIZE_TEMPLATE\n if currentWidth == w['width'] and currentHeight\n == w['height']]\n # Check output templates\n if OUTPUT_OPTION == []:\n raise RuntimeError(\n 'The current output size is not one of the defined templates. This is unsupported.\\n\\\n To continue, select one of the templated output formats.'\n )\n TEMPLATE = None\n IMAGE_PATHS = []\n FOOTAGE_NAMES = []\n MAYA_CAMERA = None\n\n TEMPLATE = OUTPUT_OPTION[0]['suffix']\n\n if pathControlSelection == renderOutput.OUTPUT_TEMPLATES[0]:\n print '# Output path not yet set #'\n raise RuntimeError('Path template is not set. To continue, select one of the output path templates.')\n\n LAYER_NAME = renderSetup.instance().getVisibleRenderLayer().name()\n VERSION = cmds.optionMenu('%s_outputVersionMenu' % windowID,\n query=True, value=True)\n\n # Decode the exr template file\n decoded_exr_template_file = base64.b64decode(templates.EXR_TEMPLATES[TEMPLATE])\n PADDING = str(int(START_FRAME)).zfill(4)\n\n BASE_PATH = rsRenderOutput.pathStr(\n LAYER_NAME,\n long=True\n )\n IMAGE_PATH = '{path}_{padding}.{ext}'.format(\n path=BASE_PATH,\n padding=PADDING,\n ext=EXTENSION\n )\n\n if 'layout' in LAYER_NAME:\n IMAGE_PATH = '{path}.{padding}.{ext}'.format(\n path=BASE_PATH,\n padding=PADDING,\n ext='jpg'\n )\n IMAGE_PATH = os.path.normpath(IMAGE_PATH)\n\n\n def capture_layout():\n multiSample = cmds.getAttr(\n 'hardwareRenderingGlobals.multiSampleEnable'\n )\n ssao = cmds.getAttr(\n 'hardwareRenderingGlobals.ssaoEnable'\n )\n\n window = autoConnect.captureWindow(\n int(currentWidth) * 0.50, int(currentHeight) * 0.50 + 30\n )\n\n # Tying to force Maya to retain this setting...\n # Set image format to jpg\n cmds.setAttr('%s.imageFormat'\n % renderOutput.DEFAULTS_NODE, 8)\n\n # Make pers non-renderable\n cmds.setAttr('perspShape.renderable', 0)\n\n # Make camera renderable, if exists.\n cmds.setAttr('cameraShape.renderable', 1)\n image_path = IMAGE_PATH.replace('.{}.jpg'.format(PADDING), '')\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', ''))\n cmds.playblast( # compression=compression,\n format='image',\n percent=int(100),\n viewer=False,\n startTime=int(START_FRAME),\n endTime=int(END_FRAME),\n showOrnaments=True,\n forceOverwrite=True,\n filename=image_path,\n widthHeight=[int(currentWidth),\n int(currentHeight)],\n rawFrameNumbers=True,\n framePadding=int(4),\n )\n\n cmds.setAttr(\n 'hardwareRenderingGlobals.multiSampleEnable', multiSample)\n cmds.setAttr('hardwareRenderingGlobals.ssaoEnable',\n ssao)\n window.close()\n\n def confirm_overwrite(aov):\n message = '{layer} - {aov}: Render images already exists at the current location.\\n'\n message += 'If you choose \\'Overwrite\\' they will be replaced with a blank placeholder sequence.\\n\\n'\n message += 'Otherwise click \\'Import Existing\\' to import the existing sequence (recommended).\\n'\n message += 'Image Path: {path}'\n\n message = message.format(\n layer=LAYER_NAME,\n aov=aov,\n path=IMAGE_PATH\n )\n\n return cmds.confirmDialog(\n title='Warning',\n message=message,\n button=['Import Existing', 'Overwrite'],\n defaultButton='Import Existing',\n cancelButton='Import Existing',\n dismissString='Import Existing',\n )\n\n def write_exrs(aov):\n for n in xrange(DURATION):\n IMAGE_PATH = '{path}_{padding}.{ext}'.format(\n path=BASE_PATH,\n padding=str(int(START_FRAME) + int(n)).zfill(4),\n ext=EXTENSION\n )\n\n if 'layout' in LAYER_NAME:\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', ''))\n else:\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', aov))\n\n if not os.path.exists(os.path.dirname(image_path)):\n os.makedirs(os.path.dirname(image_path))\n\n with open(image_path, 'w') as exr_file:\n exr_file = open(image_path, 'w')\n exr_file.write(decoded_exr_template_file)\n\n\n # LOOP THROUGH AOVS\n AOVs = rsRenderOutput.get_active_Arnold_AOVs()\n\n if not AOVs:\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', ''))\n if os.path.isfile(image_path) and (confirm_overwrite('(no AOVs)') == 'Overwrite'):\n if 'layout' in LAYER_NAME:\n capture_layout()\n else:\n write_exrs(None)\n elif not os.path.isfile(image_path):\n if 'layout' in LAYER_NAME:\n capture_layout()\n else:\n write_exrs(None)\n\n if os.path.isfile(image_path):\n IMAGE_PATHS.append(str(image_path))\n FOOTAGE_NAMES.append(\n '{layer}_{aov}_{version}'.format(\n layer=LAYER_NAME.replace('_rsLayer', ''),\n aov='beauty',\n version=VERSION\n )\n )\n\n for aov in AOVs:\n if 'layout' in LAYER_NAME:\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', ''))\n else:\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', aov))\n\n if os.path.isfile(image_path) and (confirm_overwrite(aov) == 'Overwrite'):\n if 'layout' in LAYER_NAME:\n capture_layout()\n else:\n write_exrs(aov)\n elif not os.path.isfile(image_path):\n if 'layout' in LAYER_NAME:\n capture_layout()\n else:\n write_exrs(aov)\n\n\n if os.path.isfile(image_path):\n IMAGE_PATHS.append(str(image_path))\n if 'layout' in LAYER_NAME:\n if os.path.isfile(image_path):\n FOOTAGE_NAMES.append(\n '{layer}_{aov}_{version}'.format(\n layer=LAYER_NAME.replace('_rsLayer', ''),\n aov='beauty',\n version=VERSION\n )\n )\n break\n if os.path.isfile(image_path):\n FOOTAGE_NAMES.append(\n '{layer}_{aov}_{version}'.format(\n layer=LAYER_NAME.replace('_rsLayer', ''),\n aov=aov,\n version=VERSION\n )\n )\n\n # House cleaning\n rsUtility.removeMissingSelections()\n\n # Export Camera from scene\n MAYA_CAMERA = autoConnect.exportCamera()\n if MAYA_CAMERA:\n pass\n else:\n raise RuntimeError('Couldn\\'t export maya camera.')\n\n # ############################################################\n # Time to call Ater Effects!\n\n if IMAGE_PATHS:\n pass\n else:\n raise RuntimeError('No image path could be found to export.')\n\n ac = autoConnect.AutoConnect()\n aePath = ac.AFTER_EFFECTS_PATH\n if aePath:\n pass\n else:\n raise RuntimeError('Couldn\\'t find After Effects.')\n\n tempfile.gettempdir()\n scriptPath = os.path.normpath(os.path.join(tempfile.gettempdir(),\n 'aeCommand.jsx'))\n\n # #############################################\n # Script file\n\n script = aeCommand.script\n AE_SCRIPT = script.replace(\n '<Name>', str(SCENE_NAME)\n ).replace(\n '<Width>', str(currentWidth)\n ).replace(\n '<Height>', str(currentHeight)\n ).replace(\n '<Pixel_Aspect>',str(1)\n ).replace(\n '<Duration>', str(float(DURATION) / float(FRAME_RATE))\n ).replace(\n '<Frame_Rate>', str(float(FRAME_RATE))\n ).replace(\n '<Image_Paths>', '{}'.format(IMAGE_PATHS)\n ).replace(\n '<Footage_Names>', str(FOOTAGE_NAMES)\n ).replace(\n '<Maya_Camera>', MAYA_CAMERA.replace('\\\\', '\\\\\\\\')\n )\n\n # #############################################\n\n with open(scriptPath, 'w') as AE_SCRIPT_FILE:\n AE_SCRIPT_FILE.write(str(AE_SCRIPT))\n AE_SCRIPT_FILE.close()\n\n cmd = '\"%s\" -r \"%s\"' % (ac.AFTER_EFFECTS_PATH, scriptPath)\n process = QProcess()\n process.startDetached(cmd)", "def main():\n parser = argparse.ArgumentParser(description=\"Process a wavefront object file\")\n parser.add_argument('--width', help=\"Width of output image\", dest='width',\n type=int, default=800)\n parser.add_argument('--height', help=\"Height of output image\", dest='height',\n type=int, default=600)\n parser.add_argument('--out', help=\"Name of output image file\", dest='output', type=str,\n default='output.png')\n parser.add_argument('filename', help=\"Alias Wavefront file to read as input\", type=str)\n args = parser.parse_args()\n\n back_buffer = Buffer(args.width, args.height)\n screen = Screen(back_buffer)\n\n obj = Wavefront(args.filename)\n print(\"Processing {}\".format(args.filename))\n\n max_extent = max(obj.v_extent[0], obj.v_extent[1])\n scale = min(args.width / max_extent, args.height / max_extent)\n translate = ((args.width / 2) + obj.v_min[0] + obj.v_max[0],\n (args.height / 2) + obj.v_min[1] + obj.v_max[1])\n print(\"Using scale: {}\".format(scale))\n\n def conv_x(x):\n \"\"\" Converts x coordinate.\"\"\"\n return int(x * scale + translate[0])\n def conv_y(y):\n \"\"\" Converts y coordinate.\"\"\"\n return int(y * scale + translate[1])\n\n for f in obj.f:\n v1 = obj.v[f[0]]\n v2 = obj.v[f[1]]\n v3 = obj.v[f[2]]\n screen.draw_line((conv_x(v1[0]), conv_y(v1[1])),\n (conv_x(v2[0]), conv_y(v2[1])),\n (255, 255, 255))\n screen.draw_line((conv_x(v2[0]), conv_y(v2[1])),\n (conv_x(v3[0]), conv_y(v3[1])),\n (255, 255, 255))\n screen.draw_line((conv_x(v3[0]), conv_y(v3[1])),\n (conv_x(v1[0]), conv_y(v1[1])),\n (255, 255, 255))\n\n print(\"Minimum vector: {}\".format(obj.v_min))\n print(\"Maximum vector: {}\".format(obj.v_max))\n print(\"Calculated extent: {}\".format(obj.v_extent))\n\n back_buffer.write_to_png(args.output)\n print(\"Written output to {}\".format(args.output))", "def gen(self):\n for path, bg_idx, bbox in zip(self.img_paths, self.bgs, self.bbox):\n img = cv2.imread(self.background[bg_idx])\n for alpha, obj, box in zip(self.alphas, self.objects, bbox):\n img, mask = self.alpha_blend(img, obj, box, alpha)\n yield path, img, mask", "def render_image(camera, scene, lights, nx, ny):\n # TODO A5 copy implementation from A4\n img = np.zeros((ny, nx, 3), np.float32)\n\n for x in range(0, nx):\n for y in range(0, ny):\n u = (x + 0.5) / nx\n v = (y + 0.5) / ny\n ray = camera.generate_ray((u, v))\n hit = scene.intersect(ray)\n img[y][x] = shade(ray, hit, scene, lights)\n\n return img", "def loadImage( self, imageName = \"nehe_wall.bmp\" ):\n try:\n from PIL.Image import open\n except ImportError, err:\n from Image import open\n glActiveTexture(GL_TEXTURE0_ARB);\n return texture.Texture( open(imageName) )", "def load(file):\n img = open(file)\n\n try:\n ix, iy, image = img.size[0], img.size[1], img.tobytes(\"raw\", \"RGBA\", 0, -1)\n except SystemError:\n ix, iy, image = img.size[0], img.size[1], img.tobytes(\"raw\", \"RGBX\", 0, -1)\n\n imgID = glGenTextures(1)\n\n glBindTexture(GL_TEXTURE_2D, imgID)\n glPixelStorei(GL_UNPACK_ALIGNMENT, 1)\n\n glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, image)\n\n return imgID" ]
[ "0.62464625", "0.61263806", "0.5809615", "0.5739291", "0.57354367", "0.57354367", "0.5628557", "0.5620815", "0.5601936", "0.5595058", "0.5571719", "0.55669874", "0.5565276", "0.5536535", "0.54908586", "0.547654", "0.5472655", "0.5457408", "0.54366314", "0.54189646", "0.54175854", "0.539162", "0.5364308", "0.5351393", "0.53460014", "0.5340002", "0.5334007", "0.53288335", "0.5327352", "0.5324157" ]
0.77711815
0
Generate image from blender scene file(.blend) with changed parameters
def generate_img_with_params(scene_file, script_name="tmp.py", xres=800, yres=600, crop=None, use_compositing=False, output=None, frame=1): if crop is None: crop = [0, 1, 0, 1] crop_file_src = generate_blender_crop_file([xres, yres], [crop[0], crop[1]], [crop[2], crop[3]], use_compositing) scene_dir = os.path.dirname(os.path.abspath(scene_file)) new_scriptpath = os.path.join(scene_dir, script_name) with open(new_scriptpath, 'w') as f: f.write(crop_file_src) generate_blenderimage(scene_file, output, new_scriptpath, frame)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_blenderimage(scene_file, output=None, script_file=None, frame=1):\n cmd = [BLENDER, \"-b\", scene_file, \"-y\"]\n previous_wd = os.getcwd()\n os.chdir(os.path.dirname(os.path.abspath(scene_file)))\n if script_file:\n cmd.append(\"-P\")\n cmd.append(script_file)\n if output:\n outbase, ext = os.path.splitext(output)\n cmd.append(\"-o\")\n cmd.append(output)\n print(ext)\n print(ext[1:])\n if ext:\n cmd.append(\"-F\")\n cmd.append(ext[1:].upper())\n cmd.append(\"-noaudio\")\n cmd.append(\"-f\")\n cmd.append(str(frame))\n print(cmd)\n exec_cmd(cmd)\n os.chdir(previous_wd)", "def filesToBlender(context, prefix, max_blocks=200):\n # Get reference matrix\n refMatrix = None\n if context.scene.maps_models_importer_is_ref_matrix_valid:\n values = context.scene.maps_models_importer_ref_matrix\n refMatrix = Matrix((values[0:4], values[4:8], values[8:12], values[12:16]))\n\n drawcallId = 0\n while max_blocks <= 0 or drawcallId < max_blocks:\n if not os.path.isfile(\"{}{:05d}-indices.bin\".format(prefix, drawcallId)):\n break\n\n try:\n indices, positions, uvs, img, constants = loadData(prefix, drawcallId)\n except FileNotFoundError as err:\n print(\"Skipping ({})\".format(err))\n continue\n\n uvOffsetScale, matrix, refMatrix = extractUniforms(constants, refMatrix)\n\n # Make triangles from triangle strip index buffer\n n = len(indices)\n tris = [ [ indices[i+j] for j in [[0,1,2],[0,2,1]][i%2] ] for i in range(n - 3)]\n tris = [ t for t in tris if t[0] != t[1] and t[0] != t[2] and t[1] != t[2] ]\n verts = [ [ p[0], p[1], p[2] ] for p in positions ]\n\n [ou, ov, su, sv] = uvOffsetScale\n uvs = [ [ (floor(u * 65535.0 + 0.5) + ou) * su, (floor(v * 65535.0 + 0.5) + ov) * sv ] for u, v in uvs ]\n \n if len(indices) == 0:\n continue\n\n mesh_name = \"BuildingMesh-{:05d}\".format(drawcallId)\n obj = addMesh(context, mesh_name, verts, tris, uvs)\n obj.matrix_world = matrix\n\n mat_name = \"BuildingMat-{:05d}\".format(drawcallId)\n addImageMaterial(mat_name, obj, img)\n\n drawcallId += 1\n\n # Save reference matrix\n if refMatrix:\n values = sum([list(v) for v in refMatrix], [])\n context.scene.maps_models_importer_ref_matrix = values\n context.scene.maps_models_importer_is_ref_matrix_valid = True", "def setup_scene_for_rgb_render(scene, outdir):\n # Use node rendering for python control\n scene.use_nodes = True\n tree = scene.node_tree\n links = tree.links\n\n # Make sure there are no existing nodes\n for node in tree.nodes:\n tree.nodes.remove(node)\n\n # Set up a renderlayer and plug it into our remapping layer\n inp = tree.nodes.new('CompositorNodeRLayers')\n\n if (bpy.app.version[1] >= 70): # Don't apply color transformation -- changed in Blender 2.70\n scene.view_settings.view_transform = 'Raw'\n scene.sequencer_colorspace_settings.name = 'Non-Color'\n\n # Save it out\n if outdir:\n out = tree.nodes.new('CompositorNodeOutputFile')\n ident = str(uu.uuid4())\n out.file_slots[0].path = ident\n out.base_path = outdir\n # out.format.color_mode = 'BW'\n # out.format.color_depth = settings.DEPTH_BITS_PER_CHANNEL\n out.format.color_mode = 'RGB'\n out.format.color_depth = settings.COLOR_BITS_PER_CHANNEL\n out.format.file_format = settings.PREFERRED_IMG_EXT.upper()\n links.new(inp.outputs[0], out.inputs[0])\n ext = utils.img_format_to_ext[settings.PREFERRED_IMG_EXT.lower()]\n temp_filename = \"{0}0001.{1}\".format(ident, ext)\n return os.path.join(outdir, temp_filename)\n else:\n out = tree.nodes.new('CompositorNodeComposite')\n links.new(inp.outputs[0], out.inputs[0])\n return None", "def render_and_save():\n\n rendering_config = configuration.get_config()\n rendering_config = ml_collections.FrozenConfigDict(rendering_config)\n aspect_ratio = rendering_config.aspect_ratio\n height = rendering_config.height\n width = int(aspect_ratio * height)\n\n scene_camera = build_camera(rendering_config, aspect_ratio)\n world = build_world(rendering_config)\n\n # Render.\n logging.info(\"Tracing rays...\")\n render_image_fn = jax.jit(\n render.generate_image,\n static_argnames=[\"height\", \"width\", \"config\"])\n image = render_image_fn(height, width, scene_camera, world, rendering_config)\n\n image = render.correct_gamma(image, gamma=rendering_config.gamma_correction)\n\n logging.info(\"Saving to file...\")\n output.export_as_ppm(image, rendering_config.output_file)\n\n return image", "def render_sample(latents, material_names, include_lights, output_filename, save_scene):\n\n # set output path\n bpy.context.scene.render.filepath = output_filename\n\n # set objects and lights\n update_objects_and_lights(latents, material_names, include_lights)\n\n rgba_background = colorsys.hsv_to_rgb(latents[9] / (2.0 * np.pi), 0.60, 1.0) + (\n 1.0,\n )\n render_utils.change_material(\n bpy.data.objects[\"Ground\"].data.materials[-1], Color=rgba_background\n )\n\n # set scene background\n bpy.ops.render.render(write_still=True)\n\n if save_scene:\n # just for debugging\n bpy.ops.wm.save_as_mainfile(\n filepath=f\"scene_{os.path.basename(output_filename)}.blend\"\n )", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def snapshot(self, components=4):\n fbo = self.fbo\n data = fbo.read(components=3)\n from PIL import Image\n return Image.frombytes('RGB', fbo.size, data).transpose(Image.FLIP_TOP_BOTTOM)", "def rsMakeComp(args):\n\n pathControlSelection = cmds.optionMenu('%s_optionMenu05'\n % windowID, query=True, value=True)\n if cmds.objExists('camera') is False:\n print '# Couldn\\'t find \\'camera\\' #'\n raise RuntimeError('Couldn\\'t find the camera. Make sure the main camera is called \\'camera\\''\n )\n\n si = autoConnect.SceneInfo()\n\n if si.isSceneSaved is False:\n print '# Scene has not been saved yet #'\n raise RuntimeError('_\\n%s' % 'Scene hasn\\'t been saved')\n\n DATA = {}\n\n BASE_PATH = si.renders\n START_FRAME = si.startFrame\n END_FRAME = si.endFrame\n DURATION = si.duration\n currentTime = si.currentTime\n currentWidth = si.currentWidth\n currentHeight = si.currentHeight\n FRAME_RATE = si.frameRate\n EXTENSION = 'exr'\n IMAGE_PATH = None\n\n sn = cmds.file(query=True, sn=True, shortName=True)\n if sn:\n # removes the versioning // Studio AKA specific setting.\n SCENE_NAME = (sn.split('.')[0])[:-4]\n else:\n SCENE_NAME = 'untitled_maya_scene'\n OUTPUT_OPTION = [w for w in renderOutput.SIZE_TEMPLATE\n if currentWidth == w['width'] and currentHeight\n == w['height']]\n # Check output templates\n if OUTPUT_OPTION == []:\n raise RuntimeError(\n 'The current output size is not one of the defined templates. This is unsupported.\\n\\\n To continue, select one of the templated output formats.'\n )\n TEMPLATE = None\n IMAGE_PATHS = []\n FOOTAGE_NAMES = []\n MAYA_CAMERA = None\n\n TEMPLATE = OUTPUT_OPTION[0]['suffix']\n\n if pathControlSelection == renderOutput.OUTPUT_TEMPLATES[0]:\n print '# Output path not yet set #'\n raise RuntimeError('Path template is not set. To continue, select one of the output path templates.')\n\n LAYER_NAME = renderSetup.instance().getVisibleRenderLayer().name()\n VERSION = cmds.optionMenu('%s_outputVersionMenu' % windowID,\n query=True, value=True)\n\n # Decode the exr template file\n decoded_exr_template_file = base64.b64decode(templates.EXR_TEMPLATES[TEMPLATE])\n PADDING = str(int(START_FRAME)).zfill(4)\n\n BASE_PATH = rsRenderOutput.pathStr(\n LAYER_NAME,\n long=True\n )\n IMAGE_PATH = '{path}_{padding}.{ext}'.format(\n path=BASE_PATH,\n padding=PADDING,\n ext=EXTENSION\n )\n\n if 'layout' in LAYER_NAME:\n IMAGE_PATH = '{path}.{padding}.{ext}'.format(\n path=BASE_PATH,\n padding=PADDING,\n ext='jpg'\n )\n IMAGE_PATH = os.path.normpath(IMAGE_PATH)\n\n\n def capture_layout():\n multiSample = cmds.getAttr(\n 'hardwareRenderingGlobals.multiSampleEnable'\n )\n ssao = cmds.getAttr(\n 'hardwareRenderingGlobals.ssaoEnable'\n )\n\n window = autoConnect.captureWindow(\n int(currentWidth) * 0.50, int(currentHeight) * 0.50 + 30\n )\n\n # Tying to force Maya to retain this setting...\n # Set image format to jpg\n cmds.setAttr('%s.imageFormat'\n % renderOutput.DEFAULTS_NODE, 8)\n\n # Make pers non-renderable\n cmds.setAttr('perspShape.renderable', 0)\n\n # Make camera renderable, if exists.\n cmds.setAttr('cameraShape.renderable', 1)\n image_path = IMAGE_PATH.replace('.{}.jpg'.format(PADDING), '')\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', ''))\n cmds.playblast( # compression=compression,\n format='image',\n percent=int(100),\n viewer=False,\n startTime=int(START_FRAME),\n endTime=int(END_FRAME),\n showOrnaments=True,\n forceOverwrite=True,\n filename=image_path,\n widthHeight=[int(currentWidth),\n int(currentHeight)],\n rawFrameNumbers=True,\n framePadding=int(4),\n )\n\n cmds.setAttr(\n 'hardwareRenderingGlobals.multiSampleEnable', multiSample)\n cmds.setAttr('hardwareRenderingGlobals.ssaoEnable',\n ssao)\n window.close()\n\n def confirm_overwrite(aov):\n message = '{layer} - {aov}: Render images already exists at the current location.\\n'\n message += 'If you choose \\'Overwrite\\' they will be replaced with a blank placeholder sequence.\\n\\n'\n message += 'Otherwise click \\'Import Existing\\' to import the existing sequence (recommended).\\n'\n message += 'Image Path: {path}'\n\n message = message.format(\n layer=LAYER_NAME,\n aov=aov,\n path=IMAGE_PATH\n )\n\n return cmds.confirmDialog(\n title='Warning',\n message=message,\n button=['Import Existing', 'Overwrite'],\n defaultButton='Import Existing',\n cancelButton='Import Existing',\n dismissString='Import Existing',\n )\n\n def write_exrs(aov):\n for n in xrange(DURATION):\n IMAGE_PATH = '{path}_{padding}.{ext}'.format(\n path=BASE_PATH,\n padding=str(int(START_FRAME) + int(n)).zfill(4),\n ext=EXTENSION\n )\n\n if 'layout' in LAYER_NAME:\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', ''))\n else:\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', aov))\n\n if not os.path.exists(os.path.dirname(image_path)):\n os.makedirs(os.path.dirname(image_path))\n\n with open(image_path, 'w') as exr_file:\n exr_file = open(image_path, 'w')\n exr_file.write(decoded_exr_template_file)\n\n\n # LOOP THROUGH AOVS\n AOVs = rsRenderOutput.get_active_Arnold_AOVs()\n\n if not AOVs:\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', ''))\n if os.path.isfile(image_path) and (confirm_overwrite('(no AOVs)') == 'Overwrite'):\n if 'layout' in LAYER_NAME:\n capture_layout()\n else:\n write_exrs(None)\n elif not os.path.isfile(image_path):\n if 'layout' in LAYER_NAME:\n capture_layout()\n else:\n write_exrs(None)\n\n if os.path.isfile(image_path):\n IMAGE_PATHS.append(str(image_path))\n FOOTAGE_NAMES.append(\n '{layer}_{aov}_{version}'.format(\n layer=LAYER_NAME.replace('_rsLayer', ''),\n aov='beauty',\n version=VERSION\n )\n )\n\n for aov in AOVs:\n if 'layout' in LAYER_NAME:\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', ''))\n else:\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', aov))\n\n if os.path.isfile(image_path) and (confirm_overwrite(aov) == 'Overwrite'):\n if 'layout' in LAYER_NAME:\n capture_layout()\n else:\n write_exrs(aov)\n elif not os.path.isfile(image_path):\n if 'layout' in LAYER_NAME:\n capture_layout()\n else:\n write_exrs(aov)\n\n\n if os.path.isfile(image_path):\n IMAGE_PATHS.append(str(image_path))\n if 'layout' in LAYER_NAME:\n if os.path.isfile(image_path):\n FOOTAGE_NAMES.append(\n '{layer}_{aov}_{version}'.format(\n layer=LAYER_NAME.replace('_rsLayer', ''),\n aov='beauty',\n version=VERSION\n )\n )\n break\n if os.path.isfile(image_path):\n FOOTAGE_NAMES.append(\n '{layer}_{aov}_{version}'.format(\n layer=LAYER_NAME.replace('_rsLayer', ''),\n aov=aov,\n version=VERSION\n )\n )\n\n # House cleaning\n rsUtility.removeMissingSelections()\n\n # Export Camera from scene\n MAYA_CAMERA = autoConnect.exportCamera()\n if MAYA_CAMERA:\n pass\n else:\n raise RuntimeError('Couldn\\'t export maya camera.')\n\n # ############################################################\n # Time to call Ater Effects!\n\n if IMAGE_PATHS:\n pass\n else:\n raise RuntimeError('No image path could be found to export.')\n\n ac = autoConnect.AutoConnect()\n aePath = ac.AFTER_EFFECTS_PATH\n if aePath:\n pass\n else:\n raise RuntimeError('Couldn\\'t find After Effects.')\n\n tempfile.gettempdir()\n scriptPath = os.path.normpath(os.path.join(tempfile.gettempdir(),\n 'aeCommand.jsx'))\n\n # #############################################\n # Script file\n\n script = aeCommand.script\n AE_SCRIPT = script.replace(\n '<Name>', str(SCENE_NAME)\n ).replace(\n '<Width>', str(currentWidth)\n ).replace(\n '<Height>', str(currentHeight)\n ).replace(\n '<Pixel_Aspect>',str(1)\n ).replace(\n '<Duration>', str(float(DURATION) / float(FRAME_RATE))\n ).replace(\n '<Frame_Rate>', str(float(FRAME_RATE))\n ).replace(\n '<Image_Paths>', '{}'.format(IMAGE_PATHS)\n ).replace(\n '<Footage_Names>', str(FOOTAGE_NAMES)\n ).replace(\n '<Maya_Camera>', MAYA_CAMERA.replace('\\\\', '\\\\\\\\')\n )\n\n # #############################################\n\n with open(scriptPath, 'w') as AE_SCRIPT_FILE:\n AE_SCRIPT_FILE.write(str(AE_SCRIPT))\n AE_SCRIPT_FILE.close()\n\n cmd = '\"%s\" -r \"%s\"' % (ac.AFTER_EFFECTS_PATH, scriptPath)\n process = QProcess()\n process.startDetached(cmd)", "def init():\n \n # General parameters\n exp_path = '/home/laura/Documents/stacks tif/1705_regMovie.tif' # experimental tif stack (grayscale)\n bin_path = '/home/laura/Documents/stacks tif/1705/1705_binarizedMovie.tif' # binarized tif stack\n vect_path = '/home/laura/Documents/STAGE3/1705_NET/' # gpickle directory\n dest_path = '/home/laura/Documents/STAGE3/1705_NET/superposition' # output directory\n verbose = True\n debug = True\n invert = True \n main_params = [exp_path, bin_path, vect_path, dest_path, verbose, debug, invert]\n \n # Output options\n doImg = -1 # image index\n doStack = False \n doVideo = False \n compress = 3 # advice: no more than 5\n output_params = [doImg, doStack, doVideo, compress]\n \n # Drawing options (colors as BGR)\n line = True # edges drawing\n line_color = (0, 255, 0) # green \n line_size = 1 \n apex_color = (0, 0, 255) # red\n apex_size = 5\n node_color = (255, 0, 0) # blue\n node_size = 5\n body_color = (0, 255, 0) # green\n body_size = 3\n drawing_params = [line, line_color, line_size, apex_color, apex_size,\n node_color, node_size, body_color, body_size]\n \n return main_params, output_params, drawing_params", "def _export_texture_type_image(self, bo, layer, slot):\n texture = slot.texture\n layer_props = texture.plasma_layer\n mipmap = texture.use_mipmap\n\n # Does the image have any alpha at all?\n if texture.image is not None:\n has_alpha = texture.use_calculate_alpha or slot.use_stencil or self._test_image_alpha(texture.image)\n if (texture.image.use_alpha and texture.use_alpha) and not has_alpha:\n warning = \"'{}' wants to use alpha, but '{}' is opaque\".format(texture.name, texture.image.name)\n self._exporter().report.warn(warning, indent=3)\n else:\n has_alpha = True\n\n # First, let's apply any relevant flags\n state = layer.state\n if not slot.use_stencil and not slot.use_map_normal:\n # mutually exclusive blend flags\n if texture.use_alpha and has_alpha:\n if slot.blend_type == \"ADD\":\n state.blendFlags |= hsGMatState.kBlendAlphaAdd\n elif slot.blend_type == \"MULTIPLY\":\n state.blendFlags |= hsGMatState.kBlendAlphaMult\n else:\n state.blendFlags |= hsGMatState.kBlendAlpha\n\n if texture.invert_alpha and has_alpha:\n state.blendFlags |= hsGMatState.kBlendInvertAlpha\n\n if texture.extension in {\"CLIP\", \"EXTEND\"}:\n state.clampFlags |= hsGMatState.kClampTexture\n\n # Now, let's export the plBitmap\n # If the image is None (no image applied in Blender), we assume this is a plDynamicTextMap\n # Otherwise, we toss this layer and some info into our pending texture dict and process it\n # when the exporter tells us to finalize all our shit\n if texture.image is None:\n dtm = self._mgr.find_create_object(plDynamicTextMap, name=\"{}_DynText\".format(layer.key.name), bl=bo)\n dtm.hasAlpha = texture.use_alpha\n # if you have a better idea, let's hear it...\n dtm.visWidth, dtm.visHeight = 1024, 1024\n layer.texture = dtm.key\n else:\n detail_blend = TEX_DETAIL_ALPHA\n if layer_props.is_detail_map and mipmap:\n if slot.blend_type == \"ADD\":\n detail_blend = TEX_DETAIL_ADD\n elif slot.blend_type == \"MULTIPLY\":\n detail_blend = TEX_DETAIL_MULTIPLY\n\n # Herp, derp... Detail blends are all based on alpha\n if layer_props.is_detail_map and not state.blendFlags & hsGMatState.kBlendMask:\n state.blendFlags |= hsGMatState.kBlendDetail\n\n allowed_formats = {\"DDS\"} if mipmap else {\"PNG\", \"BMP\"}\n self.export_prepared_image(texture=texture, owner=layer,\n use_alpha=has_alpha, force_calc_alpha=slot.use_stencil,\n is_detail_map=layer_props.is_detail_map,\n detail_blend=detail_blend,\n detail_fade_start=layer_props.detail_fade_start,\n detail_fade_stop=layer_props.detail_fade_stop,\n detail_opacity_start=layer_props.detail_opacity_start,\n detail_opacity_stop=layer_props.detail_opacity_stop,\n mipmap=mipmap, allowed_formats=allowed_formats,\n indent=3)", "def calc_source_blend_params(params,log):\n\n source = photometry_classes.Star()\n\n source.fs_g = params['f_s_g']\n source.sig_fs_g = params['sig_f_s_g']\n (source.g, source.sig_g) = flux_to_mag_pylima(source.fs_g,source.sig_fs_g)\n\n source.fs_r = params['f_s_r']\n source.sig_fs_r = params['sig_f_s_r']\n (source.r, source.sig_r) = flux_to_mag_pylima(source.fs_r,source.sig_fs_r)\n\n source.fs_i = params['f_s_i']\n source.sig_fs_i = params['sig_f_s_i']\n (source.i, source.sig_i) = flux_to_mag_pylima(source.fs_i,source.sig_fs_i)\n\n source.compute_colours(use_inst=True)\n source.transform_to_JohnsonCousins()\n\n log.info('\\n')\n log.info('Source measured photometry:')\n log.info(source.summary(show_mags=True))\n log.info(source.summary(show_mags=False,show_colours=True))\n log.info(source.summary(show_mags=False,johnsons=True))\n\n blend = photometry_classes.Star()\n\n blend.fs_g = params['f_b_g']\n blend.sig_fs_g = params['sig_f_b_g']\n (blend.g, blend.sig_g) = flux_to_mag_pylima(blend.fs_g,blend.sig_fs_g)\n\n blend.fs_r = params['f_b_r']\n blend.sig_fs_r = params['sig_f_b_r']\n (blend.r, blend.sig_r) = flux_to_mag_pylima(blend.fs_r,blend.sig_fs_r)\n\n blend.fs_i = params['f_b_i']\n blend.sig_fs_i = params['sig_f_b_i']\n (blend.i, blend.sig_i) = flux_to_mag_pylima(blend.fs_i,blend.sig_fs_i)\n\n blend.compute_colours(use_inst=True)\n blend.transform_to_JohnsonCousins()\n\n log.info('\\n')\n log.info('Blend measured photometry:')\n log.info(blend.summary(show_mags=True))\n log.info(blend.summary(show_mags=False,show_colours=True))\n log.info(blend.summary(show_mags=False,johnsons=True))\n\n return source, blend", "def __init__(self): # image, scale):\n super().__init__()\n self.textures = []\n self.center_x = random.randrange(SCREEN_WIDTH)\n self.center_y = random.randrange(SCREEN_HEIGHT)\n coin_img_path = \"img/silver_coins/Silver_%i.png\"\n for y in range(1, 11):\n self.textures.append(arcade.load_texture(coin_img_path % y, scale=0.1))\n self.cur_texture_index = random.randrange(len(self.textures))", "def create_target_image(self, builder, target, base_image, parameters):", "def uvSnapshot(arg=None):\n\n FILE_NAME = 'uv.jpg'\n RESOLUTION = 4096\n\n sel = getListSelection()\n ac = autoConnect.AutoConnect()\n\n if ac.PHOTOSHOP_PATH is None:\n raise WindowsError('Couldn\\'t find Adobe Photoshop.')\n\n for s in sel:\n shaderName = shaderUtility.customStringToShaderName(s)\n if [f for f in ac.DATA.keys() if shaderName == f] == []:\n print '# Shader doesn\\'t have an AutoConnect setup.'\n continue\n\n # Launch photoshop process\n\n editTexture()\n\n usedBy = shaderUtility.data[shaderName]['usedBy']\n parents = []\n for u in usedBy:\n parent = cmds.listRelatives(u, allParents=True,\n path=True)[0]\n parents.append(parent)\n cmds.select(parents)\n\n p = path.normpath(path.join(ac.workspace, ac.sourceImages,\n shaderName))\n if os.path.isdir(p) is not True:\n os.mkdir(p)\n path.normpath(path.join(p, FILE_NAME))\n cmds.uvSnapshot(\n name=path.normpath(path.join(p, FILE_NAME)),\n overwrite=True,\n antiAliased=True,\n fileFormat='jpg',\n xResolution=RESOLUTION,\n yResolution=RESOLUTION,\n )\n\n # Let's call Photoshop\n\n script = psCommand.script\n PS_SCRIPT = script.replace('<UV_Image_Path>',\n path.normpath(path.join(p,\n FILE_NAME)).replace('\\\\', '\\\\\\\\'\n )).replace('<Texture_PSD_Name>',\n '%s.psd' % shaderName)\n\n tempDir = tempfile.gettempdir()\n scriptFile = 'psScript.jsx'\n\n p = path.join(tempDir, scriptFile)\n f = open(p, 'w')\n f.write(PS_SCRIPT)\n f.close()\n\n cmd = '\"%s\" \"%s\"' % (path.normpath(ac.PHOTOSHOP_PATH),\n path.normpath(p))\n process = QProcess()\n process.startDetached(cmd)", "def system_7(seed_img_file, in_dir, out_dir, threshold, num_frames, num_prev_frames, blend_coef, blur=(3,3), as_numeric=True, stretched=True):\n pass", "def create_base_image(self, builder, template, parameters):", "def photorealistic(case_config_filepath, render_config_filepath):\n\n # Load config file with all common directory names\n dirname_config = configparser.ConfigParser()\n dirname_config.read(\"dirname.cfg\")\n\n # Get information from config files\n cconfd = load_config.get_config_params(case_config_filepath) # Case file\n rconfd = load_config.get_config_params(render_config_filepath) # Render file\n\n # Main output directory\n case_output = dirname_config[\"DIRECTORIES\"][\"RenderOutput\"] + cconfd[\"case_name\"] + \"/\"\n \n # Determine interface geometry output dir\n geometry_output_dir = case_output + dirname_config[\"DIRECTORIES\"][\"ply\"]\n\n # Determine individual frame output dir\n image_output_dir_spec = dircheck.count_png_dirs(case_output + dirname_config[\"DIRECTORIES\"][\"tstep_sequence_photorealistic\"])\n dircheck.check_make(image_output_dir_spec) # Make it if nonexistent\n \n # Write Blender config file\n blender_config_filedir = case_output + rconfd[\"render_name\"] + \"_blender.cfg\"\n load_config.write_config_file(config_filedir=blender_config_filedir,\n config_dict={\"image_output_dir_spec\": image_output_dir_spec,\n \"ply_input_dir\": geometry_output_dir,\n \"interface_material_name\": \"WaterMaterial5\",\n \"bg_image_filepath\": rconfd[\"bg_image_filepath\"],\n \"view_fraction\": cconfd[\"dropd\"]/rconfd[\"droplet_scale\"],\n \"render_scale\": 10,\n \"resolution_percentage\": rconfd[\"resolution_percentage\"],\n \"xres\": cconfd[\"xres\"], \"yres\": cconfd[\"yres\"], \"zres\": cconfd[\"zres\"],\n \"tres\": cconfd[\"tres\"],\n \"interface_half_enabled\": rconfd[\"interface_half_enabled\"],\n \"fog_enabled\": rconfd[\"fog_enabled\"],\n \"camera_azimuth_angle\": rconfd[\"camera_azimuth_angle\"],\n \"camera_elevation_angle\": rconfd[\"camera_elevation_angle\"],\n \"bg_color_1\": rconfd[\"bg_color_1\"], \"bg_color_2\": rconfd[\"bg_color_2\"]})\n\n # Extract droplet interface geometry\n convert_data.conv_ply(h5dns_path=cconfd[\"h5dns_path\"], output_dir=geometry_output_dir, tres=int(cconfd[\"tres\"]))\n\n # Extract vapor fog (YV) if enabled\n if rconfd[\"fog_enabled\"]:\n # Determine individual fog dir and make it if necessary\n fog_halved = rconfd[\"fog_half_enabled\"]\n fog_dir_specifier = str(rconfd[\"fog_vapor_min\"]) + \"halved\" + str(fog_halved) + \"/\"\n bvox_output_dir_spec = case_output + dirname_config[\"DIRECTORIES\"][\"bvox\"] + fog_dir_specifier\n dircheck.check_make(bvox_output_dir_spec)\n # Convert fog data\n convert_data.conv_bvox(h5dns_path=cconfd[\"h5dns_path\"], output_dir=bvox_output_dir_spec, tres=int(cconfd[\"tres\"]), vapor_min=float(rconfd[\"fog_vapor_min\"]), fog_halved=fog_halved)\n # Add fog dir to Blender config file\n load_config.write_config_file(config_filedir=blender_config_filedir, config_dict={\"bvox_input_dir\": bvox_output_dir_spec}, append_config=True)\n \n # Launch Blender to perform rendering\n blender_launcher.launch_blender_new(blender_config_filedir=blender_config_filedir, python_name=\"droplet_render.py\", blend_name=\"droplet_render.blend\")", "def example_BSR():\n pts = [(1,1),(2,2),(3,3)]\n lines = [ [ (1,1), (1,2), (2,1)], [ (6,1), (1,6), (5,-1)] ]\n\n bloody_simple_2drender('2d_render.png', pts=pts, vecs=pts, lines=lines )", "def _create_image(self):\n if hasattr(self, '_image') and self._image:\n return self._image\n try:\n command = \"tex2im -b transparent -t cyan\"\n subprocess.run([*command.split(), self._formula])\n except Exception as e:\n import traceback\n print(traceback.format_exc())\n return None\n # tex2im converts to out.png by default\n img = Image.open('out.png').convert('RGBA')\n # create a new rgba image to blend the latex with the alpha\n subprocess.run([\"rm\", \"out.png\"])\n return img", "def main():\n parser = argparse.ArgumentParser(description=\"Process a wavefront object file\")\n parser.add_argument('--width', help=\"Width of output image\", dest='width',\n type=int, default=800)\n parser.add_argument('--height', help=\"Height of output image\", dest='height',\n type=int, default=600)\n parser.add_argument('--out', help=\"Name of output image file\", dest='output', type=str,\n default='output.png')\n parser.add_argument('filename', help=\"Alias Wavefront file to read as input\", type=str)\n args = parser.parse_args()\n\n back_buffer = Buffer(args.width, args.height)\n screen = Screen(back_buffer)\n\n obj = Wavefront(args.filename)\n print(\"Processing {}\".format(args.filename))\n\n max_extent = max(obj.v_extent[0], obj.v_extent[1])\n scale = min(args.width / max_extent, args.height / max_extent)\n translate = ((args.width / 2) + obj.v_min[0] + obj.v_max[0],\n (args.height / 2) + obj.v_min[1] + obj.v_max[1])\n print(\"Using scale: {}\".format(scale))\n\n def conv_x(x):\n \"\"\" Converts x coordinate.\"\"\"\n return int(x * scale + translate[0])\n def conv_y(y):\n \"\"\" Converts y coordinate.\"\"\"\n return int(y * scale + translate[1])\n\n for f in obj.f:\n v1 = obj.v[f[0]]\n v2 = obj.v[f[1]]\n v3 = obj.v[f[2]]\n screen.draw_line((conv_x(v1[0]), conv_y(v1[1])),\n (conv_x(v2[0]), conv_y(v2[1])),\n (255, 255, 255))\n screen.draw_line((conv_x(v2[0]), conv_y(v2[1])),\n (conv_x(v3[0]), conv_y(v3[1])),\n (255, 255, 255))\n screen.draw_line((conv_x(v3[0]), conv_y(v3[1])),\n (conv_x(v1[0]), conv_y(v1[1])),\n (255, 255, 255))\n\n print(\"Minimum vector: {}\".format(obj.v_min))\n print(\"Maximum vector: {}\".format(obj.v_max))\n print(\"Calculated extent: {}\".format(obj.v_extent))\n\n back_buffer.write_to_png(args.output)\n print(\"Written output to {}\".format(args.output))", "def generate_image(self):\n pass", "def setRenderSettings(filePath):\n cache.values[\"engine\"] = bpy.context.scene.render.engine\n cache.values[\"transparent\"] = bpy.context.scene.render.film_transparent\n\n cache.values[\"filepath\"] = bpy.context.scene.render.filepath\n cache.values[\"format\"] = bpy.context.scene.render.image_settings.file_format\n cache.values[\"mode\"] = bpy.context.scene.render.image_settings.color_mode\n cache.values[\"depth\"] = bpy.context.scene.render.image_settings.color_depth\n\n cache.values[\"resolutionX\"] = bpy.context.scene.render.resolution_x\n cache.values[\"resolutionY\"] = bpy.context.scene.render.resolution_y\n cache.values[\"percentage\"] = bpy.context.scene.render.resolution_percentage\n cache.values[\"aspectX\"] = bpy.context.scene.render.pixel_aspect_x\n cache.values[\"aspectY\"] = bpy.context.scene.render.pixel_aspect_y\n\n # Define the necessary render settings.\n bpy.context.scene.render.engine = 'BLENDER_EEVEE'\n bpy.context.scene.render.film_transparent = True\n\n bpy.context.scene.render.filepath = filePath\n bpy.context.scene.render.image_settings.file_format = 'PNG'\n bpy.context.scene.render.image_settings.color_mode = 'RGBA'\n bpy.context.scene.render.image_settings.color_depth = '8'\n\n bpy.context.scene.render.resolution_x = IMAGE_SIZE\n bpy.context.scene.render.resolution_y = IMAGE_SIZE\n bpy.context.scene.render.resolution_percentage = 100\n bpy.context.scene.render.pixel_aspect_x = 1.0\n bpy.context.scene.render.pixel_aspect_y = 1.0\n\n # Store the current world.\n cache.values[\"world\"] = bpy.context.scene.world", "def __init__(self, name):\r\n super(OffScreenTexture, self).__init__(name)\r\n from pi3d.Display import Display\r\n self.ix, self.iy = Display.INSTANCE.width, Display.INSTANCE.height\r\n self.im = Image.new(\"RGBA\",(self.ix, self.iy))\r\n self.image = self.im.convert(\"RGBA\").tostring('raw', \"RGBA\")\r\n self.alpha = True\r\n self.blend = False\r\n\r\n self._tex = ctypes.c_int()\r\n self.framebuffer = (ctypes.c_int * 1)()\r\n opengles.glGenFramebuffers(1, self.framebuffer)\r\n self.depthbuffer = (ctypes.c_int * 1)()\r\n opengles.glGenRenderbuffers(1, self.depthbuffer)", "def load_obj_render_BSR(objfile):\n #load a 3d model and render it in 2D\n obj = object3d() \n obj.load(objfile)\n \n #obj.scale_pts( (.1,.2,.1) )\n #obj.rotate_pts( (.1,.1,.1) )\n\n #obj2 = object3d() \n #obj2.load('objects/monkey.obj')\n\n bloody_simple_2drender('2d_render.png', obj=[obj], gridsize=100)", "def create_dataset():\n with open(\"/root/config.json\", \"r\") as f:\n config = json.load(f)\n\n # create environmental variables\n for (key, value) in config.items():\n os.environ[key] = str(value)\n\n # run blender\n command = '/usr/lib/blender/blender {} --python {} --background'.\\\n format(\"/root/models/default.blend\", \"/root/rendering.py\")\n os.system(command)\n\n # post processing\n post_processing()", "def construct_blend_dict(assets):\r\n\tblend_dict = {}\r\n\tfor blend in assets:\r\n\t\tblend_dict[blend] = []\r\n\r\n\t\tfile = open(blend, 'rb')\r\n\t\tdata = str(file.read())\r\n\t\tfile.close()\r\n\r\n\t\tparts = data.split(\"OB\")\r\n\t\tfor n in range(len(parts)):\r\n\t\t\tobj_name = \"\"\r\n\t\t\tt = 0\r\n\r\n\t\t\tif n != 0 and parts[n - 1][-2:] == \"00\":\r\n\t\t\t\tif parts[n][:4] != \"JECT\":\r\n\t\t\t\t\tif parts[n][t] not in [\"\\\\\", \" \", \"?\", \".\", \")\", \"(\"]:\r\n\t\t\t\t\t\twhile (parts[n][t] not in [\"\\\\\", \" \"]):\r\n\t\t\t\t\t\t\tobj_name += parts[n][t]\r\n\t\t\t\t\t\t\tt += 1\r\n\r\n\t\t\tif obj_name:\r\n\t\t\t\t#blend_dict[obj_name] = blend\r\n\t\t\t\tblend_dict[blend].append(obj_name)\r\n\r\n\treturn blend_dict", "def export_image(self, name):\n\t\tred = Color(\"red\")\n\t\tblue = Color(\"blue\")\n\t\twhite = Color(\"white\")\n\t\tblack = Color(\"black\")\n\t\tgold = Color(\"gold\")\n\t\trgb_gold = []\n\t\tfor part in gold.rgb:\n\t\t\tpart = part * 255\n\t\t\trgb_gold.append(part)\n\t\trgb_black = []\n\t\tfor part in black.rgb:\n\t\t\tpart = part * 255\n\t\t\trgb_black.append(part)\n\t\trgb_white = []\n\t\tfor part in white.rgb:\n\t\t\tpart = part * 255\n\t\t\trgb_white.append(part)\n\t\tcolours = list(red.range_to(blue, int(self.grains)))\n\t\timage = np.zeros([self.space.shape[1],self.space.shape[0], 3], dtype=np.uint(8))\n\t\tfor grain in range(self.grains+1):\n\t\t\trgb = []\n\t\t\tfor part in colours[grain-1].rgb:\n\t\t\t\tpart = part * 255\n\t\t\t\trgb.append(part)\n\t\t\tfor cell in self.space.flat:\n\t\t\t\tif cell.state == grain:\n\t\t\t\t\tx,y = cell.find_id()\n\t\t\t\t\timage[x,y] = rgb\n\t\t\t\tif cell.state == 999:\n\t\t\t\t\tx,y = cell.find_id()\n\t\t\t\t\timage[x,y] = rgb_black\n\t\t\t\tif cell.state == 500:\n\t\t\t\t\tx,y = cell.find_id()\n\t\t\t\t\timage[x,y] = rgb_gold\n\t\timg = Image.fromarray(image.astype('uint8'))\n\t\timg = img.resize((self.space.shape[1]*3,self.space.shape[0]*3))\n\t\timg.save('./static/temp/'+str(name)+'.png')", "def read_layout(outFile=None, linked=False, append=False):\n from cgl.plugins.blender.lumbermill import scene_object, LumberObject, import_file\n from cgl.core.utils.read_write import load_json\n import bpy\n\n if outFile == None:\n outFileObject = scene_object().copy(ext='json', task='lay', user='publish').latest_version()\n outFileObject.set_attr(filename='%s_%s_%s.%s' % (outFileObject.seq,\n outFileObject.shot,\n outFileObject.task,\n 'json'\n ))\n outFile = outFileObject.path_root\n # outFile = scene_object().path_root.replace(scene_object().ext, 'json')\n\n\n\n data = load_json(outFile)\n\n for p in data:\n print(p)\n data_path = data[p]['source_path']\n blender_transform = data[p]['blender_transform']\n\n transform_data = []\n for value in blender_transform:\n transform_data.append(value)\n\n print(transform_data)\n\n pathToFile = os.path.join(scene_object().root, data_path)\n lumberObject = LumberObject(pathToFile)\n\n\n\n if lumberObject.filename in bpy.data.libraries:\n lib = bpy.data.libraries[lumberObject.filename]\n bpy.data.batch_remove(ids=([lib]))\n import_file(lumberObject.path_root, linked=linked, append=append)\n else:\n import_file(lumberObject.path_root, linked=linked, append=append)\n\n if p not in bpy.context.collection.objects:\n obj = bpy.data.objects.new(p, None)\n bpy.context.collection.objects.link(obj)\n obj.instance_type = 'COLLECTION'\n obj.instance_collection = bpy.data.collections[lumberObject.asset]\n obj.location = (transform_data[0], transform_data[1], transform_data[2])\n obj.rotation_euler = (transform_data[3], transform_data[4], transform_data[5])\n obj.scale = (transform_data[6], transform_data[7], transform_data[8])\n\n bpy.ops.file.make_paths_relative()", "def render_save(scene, cam, globalIdx, trajDir, camDir, NI=1280, NJ=720):\n #render image/convert to bimg\n expimg = scene.render(cam, NI, NJ);\n bimg = convert_image(expimg); \n exp_fname = trajDir + \"/exp_%(#)06d.png\"%{\"#\":globalIdx};\n save_image(bimg, exp_fname); \n\n #save cam\n cam_name = camDir + \"/cam_%(#)06d.txt\"%{\"#\":globalIdx}\n save_perspective_camera(cam, cam_name)\n remove_from_db([cam, expimg, bimg])", "def img_map_bg(wts):\n tex = bpy.data.textures[wts.texture]\n image_mapBG = \"\"\n # texture_coords refers to the mapping of world textures:\n if wts.texture_coords in [\"VIEW\", \"GLOBAL\"]:\n image_mapBG = \" map_type 0 \"\n elif wts.texture_coords == \"ANGMAP\":\n image_mapBG = \" map_type 1 \"\n elif wts.texture_coords == \"TUBE\":\n image_mapBG = \" map_type 2 \"\n\n if tex.use_interpolation:\n image_mapBG += \" interpolate 2 \"\n if tex.extension == \"CLIP\":\n image_mapBG += \" once \"\n # image_mapBG += \"}\"\n # if wts.mapping == 'CUBE':\n # image_mapBG += \"warp { cubic } rotate <-90,0,180>\"\n # no direct cube type mapping. Though this should work in POV 3.7\n # it doesn't give that good results(best suited to environment maps?)\n # if image_mapBG == \"\":\n # print(\" No background texture image found \")\n return image_mapBG" ]
[ "0.77292585", "0.6207573", "0.5900598", "0.57756275", "0.57517344", "0.5726966", "0.5690768", "0.56802726", "0.56633824", "0.5639718", "0.5580907", "0.5566535", "0.55639535", "0.5551473", "0.5547673", "0.55438566", "0.5523262", "0.5518169", "0.551783", "0.5502739", "0.5483207", "0.54776984", "0.5476126", "0.5472224", "0.544397", "0.54403526", "0.543519", "0.54298383", "0.5406926", "0.5395843" ]
0.6494286
1
The is a bigram collocation finder.
def collocationFinder(document,nbest=4): chain = lambda x : list(itertools.chain(*pos.tokenize_words(pos.tokenize_sents(x)))) stopset = set(stopwords.words('english')) filter_stops = lambda w: len(w) < 3 or w in stopset bcf = BigramCollocationFinder.from_words(chain(document)) bcf.apply_word_filter(filter_stops) return bcf.nbest(BigramAssocMeasures.likelihood_ratio, 4)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bigram_finder(self):\n return BigramCollocationFinder(self.word_fd, self.bigram_fd)", "def __init__(self, word_fd, bigram_fd, window_size=2):\n AbstractCollocationFinder.__init__(self, word_fd, bigram_fd)\n self.window_size = window_size", "def extract_collocations(records, num_collocations, collocation_window, compare_collocations = False):\r\n bigram_measures = BigramAssocMeasures()\r\n bigram_finder = BigramCollocationFinder.from_words(records, window_size=collocation_window)\r\n bigram_finder.apply_freq_filter(min_freq=3)\r\n best_collocations = bigram_finder.nbest(bigram_measures.raw_freq, num_collocations)\r\n print(\"=====The {:d} Most Frequent Collocations=====\".format(num_collocations))\r\n pprint.pprint(best_collocations)\r\n if compare_collocations:\r\n print(\"=====The {:d} Best Collocations (Pointwise Mutual Information)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.pmi, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Student's t test)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.student_t, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Chi-square test)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.chi_sq, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Mutual Information)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.mi_like, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Likelihood Ratios)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.likelihood_ratio, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Poisson Stirling)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.poisson_stirling, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Jaccard Index)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.jaccard, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Phi-square test)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.phi_sq, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Fisher's Exact Test)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.fisher, num_collocations))\r\n print(\"=====The {:d} Best Collocations (Dice's Coefficient)=====\".format(num_collocations))\r\n pprint.pprint(bigram_finder.nbest(bigram_measures.dice, num_collocations))\r\n return best_collocations", "def findChar(self, position, spaceLength ):\n leer=0 ## numeator of empty column\n Queue=[] ##this will help in serching for neighbours of pixels\n PiksList=[] ##list of balck piksels, of with consist the charakter\n length, high = self.getSize()\n \n while (position < length and self.vLineHistogram(position)==0): #serching for a first not empty line, for given position\n position+=1\n leer+=1\n if position == length: ## check if it is Space or it is End of line\n return position, \"Enter\", 0\n elif leer>=spaceLength:\n return position, \"Space\", 0\n else:\n for i in range(0,high): ##extracting all black pixels from this line\n if self.getPixel(position, i)<128:\n Queue.append((position, i))\n PiksList.append((position, i))\n\n while len(Queue)>0:\n Piksel=Queue.pop(0) ##geting firs element from Queue\n neighbourhood=[(Piksel[0]-1, Piksel[1]+1),(Piksel[0]-1, Piksel[1]),(Piksel[0]-1, Piksel[1]-1),(Piksel[0], Piksel[1]+1),(Piksel[0], Piksel[1]-1),(Piksel[0]+1, Piksel[1]+1),(Piksel[0]+1, Piksel[1]),(Piksel[0]+1, Piksel[1]-1)]\n ##to co wyzej to lista współrzędnych sąsiadów Piksela\n\n for neighbour in neighbourhood: ##cheking neighbourhood of each pixel\n if not(neighbour in PiksList) and (neighbour[0] in range(0,length)) and (neighbour[1] in range(0,high)) and self.getPixel(neighbour[0],neighbour[1])==0:\n Queue.append(neighbour)\n PiksList.append(neighbour)\n \n PiksList.sort() ##sorts list with number of column\n\n \n PiksList=self.addHigherPiks(PiksList) ##adds all piksel over finden pixels\n PiksList.sort()\n position1,High1=PiksList[0]\n position2,High2=PiksList[len(PiksList)-1] ## geting number of smalest and biggest column in group\n charLength=position2-position1\n if len(PiksList)>5: ##checkin if there are more then 5 piksels in group to eliminate case, when there are single pixels not eliminated by initial fomating\n if charLength<high: ##check if the length of finden group of pixels isn't bigger then length of tile\n newPosition= position1+(charLength/2) ##new position in the center of finden char to eliminate case, when one char is over the second\n Char=CharFrame(high,high) ##create new CrarFrame object\n \n for el in PiksList: ##making all pixels in PiksList black in ChatFrame object and white in self(LineFrame object)\n Char.putPixel(el[0]-position1,el[1])\n self.makeWhite(el[0],el[1])\n \n Char.reScale(30,30) #scaling CharFrame to the ening size\n \n return newPosition, Char, charLength/2\n\n else: ##length of goup of pixels is too big\n PiksList, Char = reconChar(PiksList,high) ## finding where to divide group of pixels\n for Piks in PiksList:\n self.makeWhite(Piks[0],Piks[1])\n position1,High1=PiksList[0]\n position2,High2=PiksList[len(PiksList)-1] ## geting number of smalest and biggest column in group\n charLength=position2-position1\n newPosition= position1+(charLength/2) ##new position in the center of finden char to eliminate case, when one char is over the second\n return newPosition, Char, charLength/2\n else: ##if there is less then 5 pixels in group\n for el in PiksList: ##making all pixels in PiksList white in self(LineFrame object)\n self.makeWhite(el[0],el[1])\n newPosition= position1+(charLength/2)\n return newPosition, \"None\", charLength/2", "def collocation_list(self, num=20, window_size=2):\n if not (\n \"_collocations\" in self.__dict__\n and self._num == num\n and self._window_size == window_size\n ):\n self._num = num\n self._window_size = window_size\n\n # print(\"Building collocations list\")\n from nltk.corpus import stopwords\n\n ignored_words = stopwords.words(\"english\")\n finder = BigramCollocationFinder.from_words(self.tokens, window_size)\n finder.apply_freq_filter(2)\n finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)\n bigram_measures = BigramAssocMeasures()\n self._collocations = list(\n finder.nbest(bigram_measures.likelihood_ratio, num)\n )\n return self._collocations", "def find_tile(self, query='*'):\n for pos, char in self:\n if char == query:\n return pos", "def getBigram(self, word1, word2):\n \n bigram = Bigram(\"NULL_DNE\", \"NULL_DNE\", 0, 0)\n boolFound = False \n \n if word1 in self.words:\n if word2 in self.words[word1].bigrams:\n bigram = self.words[word1].bigrams[word2]\n boolFound = True \n \n return bigram, boolFound", "def challenge2(self):\n # Let's try an octree-type approach\n # For each grid cube we should be able to find whether a nanobot:\n # 1) is not in range (is outside grid cube and not in range of nearest face)\n # 2) is in range of whole cube (all 8 corners are in range)\n # 3) is in range of part of the cube (i.e. not 1 or 2)\n # Root node: figure out extent of whole space\n mins = []\n maxs = []\n for axis in range(3):\n mins.append(min(self.nanobots, key=lambda n: n.coord[axis]).coord[axis])\n maxs.append(max(self.nanobots, key=lambda n: n.coord[axis]).coord[axis])\n\n for count in range(len(self.nanobots), 0, -1):\n results = self.search_coord_with_max_nanobots(mins, maxs, [], self.nanobots, count)\n if results and results[0].count >= count:\n break\n\n print(f\"Found {len(results)} octree search results with {results[0].count} nanobots in range.\")\n\n # Find result coord closest to origin\n closest_dist = np.iinfo(np.int32).max\n best_coord = None\n for result in results:\n for corner in itertools.product(*zip(result.mins, result.maxs)):\n d = manhattan_dist(corner, (0, 0, 0))\n if d < closest_dist:\n closest_dist = d\n best_coord = corner\n\n print(f\"Best coord: {best_coord} (dist={manhattan_dist(best_coord, (0, 0, 0))})\")", "def find_word(self,word):\r\n self.start_pos = []\r\n #check each row\r\n for i in range(0,len(self.wordsearch)):\r\n #check each column\r\n for j in range(0, len(self.wordsearch[i])):\r\n #find all coordinates which have the first letter of the word and store them\r\n if self.wordsearch[i][j] == self.word[0]:\r\n self.start_pos.append([i,j])\r\n \r\n \r\n #print(count)\r\n for pos in self.start_pos:\r\n if self.check_start(self.word, pos):\r\n \r\n return", "def location_of(self, c: str) -> tuple:\n\n c = c.upper()\n if c == 'J': c = 'I'\n\n row = 0\n while row < 5:\n col = self.key[row].find(c)\n\n if col != -1:\n return (row, col)\n\n row += 1\n\n raise ValueError(\"couldn't find letter %r in matrix %r\" % (c, self.key))", "def translate_to_grid(location):\n\n columns = 'abcdefghi'\n return [int(columns.index(location[0].lower())), int(location[1:])-1]", "def find_base_match(char, matrix):\n base_matches = [(row_index, column_index) for row_index, row in enumerate(matrix)\n for column_index, column in enumerate(row)\n if char == column]\n\n return base_matches", "def research_pos(self, map_list, character): \n list_pos = []\n for y in range(15): \n for x, c in enumerate(map_list[y]):\n if character in c and c == character:\n list_pos.append((x*50, y*50)) \n return list_pos", "def getSearchSpaceCoords(self):", "def __init__(self, word_fd, bigram_fd, wildcard_fd, trigram_fd):\n AbstractCollocationFinder.__init__(self, word_fd, trigram_fd)\n self.wildcard_fd = wildcard_fd\n self.bigram_fd = bigram_fd", "def _findBottom(self,col):\n min = GAME_HEIGHT\n mpos = 0\n for x in range(self.getLengthAlien()):\n if self._aliens[x][col] != None and self._aliens[x][col].y < min:\n min = self._aliens[x][col].y\n mpos = x\n return mpos", "def toindex(col, row):\n a2z = 'ABCDEFGHIJLKMNOPQRSTUVWXYZ'\n\n total = 0\n mult = 0\n for char in col:\n total += (a2z.find(char) + (26 * mult))\n mult += 1\n\n return total, row - 1", "def __init__(self, word_fd, quadgram_fd, ii, iii, ixi, ixxi, iixi, ixii):\n AbstractCollocationFinder.__init__(self, word_fd, quadgram_fd)\n self.iii = iii\n self.ii = ii\n self.ixi = ixi\n self.ixxi = ixxi\n self.iixi = iixi\n self.ixii = ixii", "def find_word(target):\n results = []\n string = \"\"\n\n for a in range(0, len(grid)):\n for b in range(0, len(grid[a])):\n # Create strings on rows in the grid.\n string += grid[a][b]\n # Is the target is his string?\n if target in string:\n # Find the target by index in the string.\n index = string.index(target)\n # The target string was found at the row and index.\n results += [(a, index)]\n string = \"\"\n\n for b in range(0, len(grid[0])):\n for a in range(0, len(grid)):\n # Create strings based on the columns of the grid.\n string += grid[a][b]\n # Is the target in this string?\n if target in string:\n # Find the target by index in the string.\n index = string.index(target)\n # The target string was found at the index and column.\n results += [(index, b)]\n string = \"\"\n\n return results", "def make_collocation_graph(self, target_word, top = 15, before = 4, after = 4, limit = 1000, exp=1):\n\n self.collocations(target_word, before=before, after=after, limit=limit)\n coll = self.sort_collocations(target_word, exp = exp)\n target_graf = dict()\n edges = []\n for word in coll[:top].index:\n edges.append((target_word, word))\n if word.isalpha():\n self.collocations(word, before=before, after=after, limit=limit)\n for w in self.sort_collocations(word, exp = exp)[:top].index:\n if w.isalpha():\n edges.append((word, w)) \n\n target_graph = nx.Graph()\n target_graph.add_edges_from(edges)\n self.coll_graph[target_word] = target_graph\n return target_graph", "def bigram_representation(data):\r\n vec = CountVectorizer(ngram_range=(1,2))\r\n vec = vec.fit(data)\r\n return vec", "def main(board, word):\r\n for i, row in enumerate(board):\r\n for j, square in enumerate(row):\r\n if square == word[0]:\r\n res = neighbors(board, word, i, j)\r\n if res:\r\n return True\r\n return False", "def test_double_word_coombe_martin(self):\n result = location.lookup_location('Combe Martin GB')\n\n self.assertEqual(result['country'], 'GB')", "def _local_search(self):\n\n # Set occupancies of rigid cluster and its direct neighboring atoms to\n # 1 for clash detection and MIQP\n selection = self.ligand._selection\n self.ligand._active[selection] = True\n center = self.ligand.coor[self._cluster].mean(axis=0)\n new_coor_set = []\n new_bs = []\n for coor, b in zip(self._coor_set, self._bs):\n self.ligand._coor[selection] = coor\n self.ligand._b[selection] = b\n rotator = GlobalRotator(self.ligand, center=center)\n for rotmat in RotationSets.get_local_set():\n rotator(rotmat)\n translator = Translator(self.ligand)\n iterator = itertools.product(\n *[np.arange(*trans) for trans in self._trans_box]\n )\n for translation in iterator:\n translator(translation)\n new_coor = self.ligand.coor\n if self.options.remove_conformers_below_cutoff:\n values = self.xmap.interpolate(new_coor)\n mask = self.ligand.e != \"H\"\n if np.min(values[mask]) < self.options.density_cutoff:\n continue\n if self.options.external_clash:\n if not self._cd() and not self.ligand.clashes():\n if new_coor_set:\n delta = np.array(new_coor_set) - np.array(new_coor)\n if (\n np.sqrt(\n min(np.square((delta)).sum(axis=2).sum(axis=1))\n )\n >= self.options.rmsd_cutoff\n ):\n new_coor_set.append(new_coor)\n new_bs.append(b)\n else:\n new_coor_set.append(new_coor)\n new_bs.append(b)\n elif not self.ligand.clashes():\n if new_coor_set:\n delta = np.array(new_coor_set) - np.array(new_coor)\n if (\n np.sqrt(min(np.square((delta)).sum(axis=2).sum(axis=1)))\n >= self.options.rmsd_cutoff\n ):\n new_coor_set.append(new_coor)\n new_bs.append(b)\n else:\n new_coor_set.append(new_coor)\n new_bs.append(b)\n self.ligand._active[self.ligand._selection] = False\n selection = self.ligand._selection[self._cluster]\n self.ligand._active[selection] = True\n for atom in self._cluster:\n atom_sel = self.ligand._selection[self.ligand.connectivity[atom]]\n self.ligand._active[atom_sel] = True\n self.conformer = self.ligand\n self._coor_set = new_coor_set\n self._bs = new_bs\n if len(self._coor_set) < 1:\n logger.warning(\n f\"{self.ligand.resn[0]}: \"\n f\"Local search {self._cluster_index}: {len(self._coor_set)} conformers\"\n )\n return\n\n # QP score conformer occupancy\n logger.debug(\"Converting densities.\")\n self._convert()\n self._solve_qp()\n logger.debug(\"Updating conformers\")\n self._update_conformers()\n if self.options.write_intermediate_conformers:\n self._write_intermediate_conformers(prefix=\"localsearch_ligand_qp\")\n if len(self._coor_set) < 1:\n logger.warning(\n f\"{self.ligand.resn[0]}: \"\n f\"Local search QP {self._cluster_index}: {len(self._coor_set)} conformers\"\n )\n return\n\n # MIQP score conformer occupancy\n self._convert()\n self._solve_miqp(\n threshold=self.options.threshold, cardinality=self.options.cardinality\n )\n self._update_conformers()\n if self.options.write_intermediate_conformers:\n self._write_intermediate_conformers(prefix=\"localsearch_ligand_miqp\")", "def column_similarity (self, row, col):\n my_number = self.board[row][col]\n \n for i in range (9):\n if (i,col) == (row,col):\n continue\n elif self.board[i][col] == my_number:\n return [i, col, False] \n else:\n continue", "def big_fun_search(self, grid_size, pokemon_locations, index):\n queue = [index]\n discovered = [index]\n visible = []\n\n if self.get_game()[index] == FLAG:\n return queue\n\n number = self.number_at_cell(pokemon_locations, grid_size, index)\n if number != 0:\n return queue\n\n while queue:\n node = queue.pop()\n for neighbour in self.neighbour_directions(node, grid_size):\n if neighbour in discovered:\n continue\n\n discovered.append(neighbour)\n if self._game_board[neighbour] != FLAG:\n number = self.number_at_cell(pokemon_locations, grid_size, neighbour)\n if number == 0:\n queue.append(neighbour)\n visible.append(neighbour)\n return visible", "def find_center(garden):\n\n # Find center: j is 'row', i is 'col'\n # Initialize j and i, rows and cols\n j, i = -1, -1\n num_rows = len(garden)\n num_cols = len(garden[0])\n\n # This section should be cleaned up before pushing\n if num_rows % 2 != 0:\n j = num_rows // 2\n else:\n j1, j2 = num_rows // 2, num_rows // 2 - 1\n\n if j != -1:\n if num_cols % 2 != 0:\n i = num_cols // 2\n else:\n # Find the most carrots near the center of the row j\n i = garden[j].index(max(garden[j][num_cols//2], garden[j][num_cols//2]-1))\n\n else:\n if num_cols % 2 != 0:\n i1 = garden[j1][num_cols//2]\n i2 = garden[j2][num_cols//2]\n else:\n i1 = max(garden[j1][num_cols//2], garden[j1][num_cols//2]-1)\n i2 = max(garden[j2][num_cols//2], garden[j2][num_cols//2]-1)\n\n ival = max(i1, i2)\n if ival == i1:\n j = j1\n else:\n j = j2\n\n i = garden[j].index(ival)\n\n return (j, i)", "def matched_neighbors(coord, second_char, matrix, row_length, column_length):\n row_number, column_number = coord\n neighbors_coordinates = [(row, column) for row in xrange(row_number - 1, row_number + 2)\n for column in xrange(column_number - 1, column_number + 2)\n if row_length > row >= 0 and column_length > column >= 0\n and coord_char((row, column), matrix) == second_char\n and not (row, column) == coord]\n\n return neighbors_coordinates", "def _getCountForBigram(self, word1, word2):\n return self.bigrams[(word1,word2)]", "def search_clues(self):\r\n print(\"\\n************Searching Clues************\\n\")\r\n for word_id in self.words.keys():\r\n if not self.words[word_id].see and not self.words[word_id].wth:\r\n clue = pop_backslash(self.words[word_id].clue)\r\n temp = word_domain(\"allintext:\" + clue +' -crossword',self.words[word_id].length)\r\n temp2 = temp + word_domain(clue +' -crossword',self.words[word_id].length)\r\n domain = temp2 + data_muse(clue, self.words[word_id].length)\r\n unique_list = []\r\n for x in domain: \r\n y = x.upper()\r\n # check if exists in unique_list or not \r\n if y not in unique_list: \r\n unique_list.append(y) \r\n \r\n self.words[word_id].assign_word_domain(unique_list)\r\n print(\"\\nSearch is done...\")" ]
[ "0.7552091", "0.55829513", "0.55793124", "0.55691856", "0.54569393", "0.5451188", "0.5396927", "0.5359675", "0.5291153", "0.51844424", "0.51592106", "0.50812316", "0.5008657", "0.4991801", "0.498291", "0.49716583", "0.49510488", "0.49447197", "0.49431813", "0.49225318", "0.4891009", "0.4866482", "0.48599923", "0.4854896", "0.4850726", "0.4847701", "0.48473254", "0.4847179", "0.4825331", "0.48232284" ]
0.6254757
1
Given a set of tagger_class and conll2000 training sentences, this function returns a good backoff POS tagger.
def backoff_tagger(train_sents, tagger_classes, backoff=None): for cls in tagger_classes: backoff = cls(train_sents,backoff=backoff) return backoff
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_backoff_tagger():\n\n\treturn backoff_tagger(treebank.tagged_sents(), \n\t\t[UnigramTagger, BigramTagger, TrigramTagger],\n\t\tbackoff=DefaultTagger('NN'))", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):\r\n # TODO: Write your code here\r\n # return predicted labels of development set\r\n retval = []\r\n smoothing_parameter = 0.0055\r\n # Generate a unigram BOW for both positive and negative reviews, choose the top 2500 words\r\n pos_bow, neg_bow = generate_unigram_BOW(train_set, train_labels)\r\n sorted_pos = sorted(pos_bow.items(), key=lambda x: x[1], reverse = True)\r\n sorted_neg = sorted(neg_bow.items(), key=lambda x: x[1], reverse = True)\r\n pos_words = sorted_pos[:].copy()\r\n neg_words = sorted_neg[:].copy()\r\n\r\n pos_bi_bow, neg_bi_bow = generate_bigram_BOW(train_set, train_labels)\r\n sorted_bi_pos = sorted(pos_bi_bow.items(), key=lambda x: x[1], reverse = True)\r\n sorted_bi_neg = sorted(neg_bi_bow.items(), key=lambda x: x[1], reverse = True)\r\n bi_pos_words = sorted_bi_pos[:].copy()\r\n bi_neg_words = sorted_bi_neg[:].copy()\r\n\r\n # Calculate the log probabilities each word given type\r\n pos_count = sum(pair[1] for pair in pos_words)\r\n neg_count = sum(pair[1] for pair in neg_words)\r\n bi_pos_count = sum(pair[1] for pair in bi_pos_words)\r\n bi_neg_count = sum(pair[1] for pair in bi_neg_words)\r\n\r\n log_probability_pos = {} #(word)->P(word|positive)\r\n log_probability_neg = {} #(word)->P(word|negative)\r\n log_prob_bi_pos = {}\r\n log_prob_bi_neg = {}\r\n\r\n for pair in pos_words:\r\n pos_prob = np.log((pair[1]+smoothing_parameter)/(pos_count+smoothing_parameter*(len(pos_words) + 1)))\r\n log_probability_pos[pair[0]] = pos_prob\r\n\r\n for pair in neg_words:\r\n neg_prob = np.log((pair[1]+smoothing_parameter)/(neg_count+smoothing_parameter*(len(neg_words) + 1)))\r\n log_probability_neg[pair[0]] = neg_prob\r\n\r\n for pair in bi_pos_words:\r\n bi_pos_prob = np.log((pair[1]+smoothing_parameter)/(bi_pos_count+smoothing_parameter*(len(bi_pos_words) + 1)))\r\n log_prob_bi_pos[pair[0]] = bi_pos_prob\r\n\r\n for pair in bi_neg_words:\r\n bi_neg_prob = np.log((pair[1]+smoothing_parameter)/(bi_neg_count+smoothing_parameter*(len(bi_neg_words) + 1)))\r\n log_prob_bi_neg[pair[0]] = bi_neg_prob\r\n # Finished training\r\n\r\n # For each of the new reviews from development data\r\n for review in dev_set:\r\n uni_pos = np.log(pos_prior)\r\n uni_neg = np.log(1 - pos_prior)\r\n for word in review:\r\n if word in log_probability_pos:\r\n uni_pos += log_probability_pos[word]\r\n elif word not in log_probability_pos:\r\n uni_pos += np.log(smoothing_parameter/(pos_count+smoothing_parameter*(len(pos_words) + 1)))\r\n\r\n if word in log_probability_neg:\r\n uni_neg += log_probability_neg[word]\r\n elif word not in log_probability_neg:\r\n uni_neg += np.log(smoothing_parameter/(neg_count+smoothing_parameter*(len(neg_words) + 1)))\r\n\r\n bi_pos = np.log(pos_prior)\r\n bi_neg = np.log(1 - pos_prior)\r\n for i in range(len(review)-1):\r\n currTuple = (review[i], review[i+1])\r\n if currTuple in log_prob_bi_pos:\r\n bi_pos += log_prob_bi_pos[currTuple]\r\n elif currTuple not in log_prob_bi_pos:\r\n bi_pos += np.log(smoothing_parameter/(bi_pos_count+smoothing_parameter*(len(bi_pos_words) + 1)))\r\n\r\n if currTuple in log_prob_bi_neg:\r\n bi_neg += log_prob_bi_neg[currTuple]\r\n elif currTuple not in log_prob_bi_neg:\r\n bi_neg += np.log(smoothing_parameter/(bi_neg_count+smoothing_parameter*(len(bi_neg_words) + 1)))\r\n\r\n MAP_pos = (1-0.4)*uni_pos + 0.4*bi_pos\r\n MAP_neg = (1-0.4)*uni_neg + 0.4*bi_neg\r\n\r\n if MAP_pos >= MAP_neg:\r\n retval.append(1)\r\n else:\r\n retval.append(0)\r\n\r\n return retval", "def create_tagger():\n train_sents = brown.tagged_sents()\n\n # These regexes were lifted from the NLTK book tagger chapter.\n t0 = nltk.RegexpTagger(\n [(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers\n (r'(The|the|A|a|An|an)$', 'AT'), # articles\n (r'.*able$', 'JJ'), # adjectives\n (r'.*ness$', 'NN'), # nouns formed from adjectives\n (r'.*ly$', 'RB'), # adverbs\n (r'.*s$', 'NNS'), # plural nouns\n (r'.*ing$', 'VBG'), # gerunds\n (r'.*ed$', 'VBD'), # past tense verbs\n (r'.*', 'NN') # nouns (default)\n ])\n t1 = nltk.UnigramTagger(train_sents, backoff=t0)\n t2 = nltk.BigramTagger(train_sents, backoff=t1)\n t3 = nltk.TrigramTagger(train_sents, backoff=t2)\n return t3", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):\n # TODO: Write your code here\n # return predicted labels of development set\n #\n ### len(train_set) 8000, len(dev) = 5000 P(pos) = 0.8 \n #### 0.55, 4.0, 0.30 ----------- 0.766\n #### 0.25 3.5 0.3 -------------- 0.766\n print(pos_prior)\n smoothing_parameter = 3.5\n pos_total_word = 0\n neg_total_word = 0\n pos_word_dict = {}\n neg_word_dict = {}\n dicts = [neg_word_dict, pos_word_dict]\n for i, sentence in enumerate(train_set):\n\n if train_labels[i] == 1: # positive reviews\n for word in sentence:\n pos_total_word += 1 \n if word in stop_words:\n continue\n if word in pos_word_dict:\n pos_word_dict[word] += 1\n else :\n pos_word_dict[word] = 1\n\n else: # negative reviews\n for word in sentence:\n neg_total_word += 1 \n if word in stop_words:\n continue\n if word in neg_word_dict:\n neg_word_dict[word] += 1\n else :\n neg_word_dict[word] = 1\n\n\n prob = {}\n denominator_pos = pos_total_word + smoothing_parameter * (len(pos_word_dict) + 1)\n denominator_neg = neg_total_word + smoothing_parameter * (len(neg_word_dict) + 1)\n de = [denominator_neg, denominator_pos]\n\n for t, dictionary in enumerate(dicts):\n for key, value in dictionary.items():\n if key not in prob:\n prob[key] = {0 : 0, 1 : 0}\n if smoothing_parameter != 0:\n prob[key][1 - t] = -1 * np.log(smoothing_parameter / de[t]) \n # print(prob[key][1 - t])\n\n prob[key][t] = -1 * np.log((value + smoothing_parameter) / de[t]) \n \n\n revised_prob = {}\n for key, value in prob.items():\n if np.abs(value[0] - value[1]) >= 0.25:\n revised_prob[key] = value \n\n print(len(revised_prob))\n\n dev_labels = []\n num_0 = 0\n for i, sentence in enumerate(dev_set):\n pos_odd = -1 * np.log(pos_prior)\n neg_odd = -1 * np.log(1.0 - pos_prior)\n for word in sentence:\n if word in revised_prob:\n pos_odd += revised_prob[word][1]\n neg_odd += revised_prob[word][0]\n \n if pos_odd > neg_odd:\n num_0 += 1\n dev_labels.append(1 if pos_odd <= neg_odd else 0)\n print(num_0)\n\n \n #### bigram model \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n return dev_labels", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):\n # TODO: Write your code here\n # Bag of words model\n smoothing_parameter = .007 # override smoothing parameter\n posWordBag = {}\n negWordBag = {}\n for reviewIndex, review in enumerate(train_set):\n for word in review:\n if isWordTooCommon(word) == True: # If word is too common, skip the word\n continue\n else:\n if train_labels[reviewIndex] == 1: # Positive\n if word not in posWordBag.keys():\n posWordBag[word] = 1\n else:\n posWordBag[word] += 1\n elif train_labels[reviewIndex] == 0: # Negative\n if word not in negWordBag.keys():\n negWordBag[word] = 1\n else:\n negWordBag[word] += 1\n \n posProbList, posUNK = genProb(posWordBag, smoothing_parameter)\n negProbList, negUNK = genProb(negWordBag, smoothing_parameter)\n\n # Done with training. Now development with MAP\n dev_labels = []\n for devRev in dev_set:\n reviewIsPos = math.log10(pos_prior)\n reviewIsNeg = math.log10(1 - pos_prior)\n for word in devRev:\n if isWordTooCommon(word) == True: # If word is too common, skip the word\n continue\n else:\n if word in posProbList.keys():\n reviewIsPos += posProbList[word]\n else:\n reviewIsPos += posUNK\n if word in negProbList.keys():\n reviewIsNeg += negProbList[word]\n else:\n reviewIsNeg += negUNK\n if reviewIsPos < reviewIsNeg:\n dev_labels.append(0)\n else:\n dev_labels.append(1)\n\n # return predicted labels of development set\n return dev_labels", "def predicted_tags(classification): \n # translate classification into tag_ids and weights\n try:\n doc = [[tag_id, int(weight/classification_threshold)]\n for tag_id, weight in enumerate(classification)\n if weight > classification_threshold]\n\n # add contribution from all terms in all similar LDA topics\n tag_suggestions = defaultdict(int)\n for topic, weight in lda[doc]:\n for weight, term in lda.show_topic(topic):\n if \"class:\" not in term:\n tag_suggestions[term] += weight\n\n # turn weights into actual suggestions and take topN values\n return [tag for tag in sorted(tag_suggestions,\n key=tag_suggestions.get,\n reverse=True)\n if tag_suggestions[tag] > suggestion_threshold][:topN]\n except IndexError:\n return []", "def tag_ngram_123_backoff(self, untagged_string: str):\n untagged_tokens = wordpunct_tokenize(untagged_string)\n tagger = self._load_model(\"ngram_123_backoff\")\n tagged_text = tagger.tag(untagged_tokens)\n return tagged_text", "def tag(text, pos_tagger):\n features = [get_crf_features([word for word in sent]) for sent in text]\n tags = pos_tagger.predict(features)\n tagged_text = []\n for i in range(len(text)):\n tagged_sent = []\n for j in range(len(text[i])):\n tagged_sent.append((text[i][j], tags[i][j]))\n tagged_text.append(tagged_sent)\n #print(tags)\n return tags, tagged_text", "def train(self, corpus):\n self.tokens = []\n self.tags = []\n sentences = corpus.split(NEW_LINE)\n for sentence in sentences:\n start = START_SIGHT + SLASH + START_SIGHT + SPACE + START_SIGHT + SLASH + START_SIGHT + SPACE\n end = SPACE + END + SLASH + END\n sentence = start + sentence + end \n tokens = sentence.split(SPACE)\n for t in tokens:\n token = t.rsplit(SLASH, 1)\n if (len(token) > 1):\n self.tokens.append(token) \n self.tags.append(token[TAG_INDEX])\n \n nonsense_cases = set([(END, START_SIGHT), (START_SIGHT, END),\n (START_SIGHT, START_SIGHT, END),\n (END, START_SIGHT, START_SIGHT)])\n self.bigram_tags = [b for b in zip(self.tags[:-1], self.tags[1:]) if b not in nonsense_cases]\n self.trigram_tags = [t for t in zip(self.tags[:-1], self.tags[1:], self.tags[2:])\\\n if not (t[WORD_INDEX], t[TAG_INDEX]) in nonsense_cases and\\\n not (t[WORD_INDEX], t[TAG_INDEX]) in nonsense_cases]", "def naiveBayesMixture(train_set, train_labels, dev_set, bigram_lambda,unigram_smoothing_parameter, bigram_smoothing_parameter, pos_prior):\n\n # TODO: Write your code here\n # return predicted labels of development set\n\n # counters for Training Phase\n ham = Counter()\n ham_bi = Counter()\n spam = Counter()\n spam_bi = Counter()\n\n for string, label in zip(train_set, train_labels):\n for i in range(len(string)):\n word = string[i]\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if label == 1:\n ham_bi.update({word_bi:1})\n else:\n spam_bi.update({word_bi:1})\n if label == 1:\n ham.update({word:1})\n else:\n spam.update({word:1})\n\n ham_len = 0\n for w in ham:\n ham_len += ham[w]\n spam_len = 0\n for w in spam:\n spam_len += spam[w]\n \n hambi_len = 0\n for w in ham_bi:\n hambi_len += ham_bi[w]\n spambi_len = 0\n for w in spam_bi:\n spambi_len += spam_bi[w]\n\n # labels for Development Phase\n dev_labels = []\n # dicts for P(word|ham) and P(word|spam)\n p_ham = {}\n p_spam = {}\n p_hambi = {}\n p_spambi = {}\n\n # develop likelihoods based on dev_set\n for word in ham:\n numerator = ham[word] + unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham[word] = numerator / denominator\n for word in spam:\n numerator = spam[word] + unigram_smoothing_parameter\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam[word] = numerator / denominator\n\n for word_bi in ham_bi:\n numerator = ham_bi[word_bi] + bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi[word_bi] = numerator / denominator\n for word_bi in spam_bi:\n numerator = spam_bi[word_bi] + bigram_smoothing_parameter\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi[word_bi] = numerator / denominator\n \n numerator = unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham_zero = numerator / denominator\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam_zero = numerator / denominator\n\n numerator = bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi_zero = numerator / denominator\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi_zero = numerator / denominator\n\n for string in dev_set:\n p_words_ham = math.log(pos_prior)\n p_words_spam = math.log(1 - pos_prior)\n\n p_words_hambi = math.log(pos_prior)\n p_words_spambi = math.log(1 - pos_prior)\n \n for i in range(len(string)):\n word = string[i]\n if word in p_ham:\n p_words_ham += math.log(p_ham[word])\n else:\n p_words_ham += math.log(p_ham_zero)\n if word in p_spam:\n p_words_spam += math.log(p_spam[word])\n else:\n p_words_spam += math.log(p_spam_zero)\n\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if word_bi in p_hambi:\n p_words_hambi += math.log(p_hambi[word_bi])\n else:\n p_words_hambi += math.log(p_hambi_zero)\n if word_bi in p_spambi:\n p_words_spambi += math.log(p_spambi[word_bi])\n else:\n p_words_spambi += math.log(p_spambi_zero)\n\n p_ham_mix = p_words_ham*(1-bigram_lambda) + p_words_hambi*bigram_lambda\n p_spam_mix = p_words_spam*(1-bigram_lambda) + p_words_spambi*bigram_lambda\n\n dev_labels.append(p_ham_mix >= p_spam_mix)\n\n return dev_labels", "def pos_tag(\n words: List[str], engine: str = \"perceptron\", corpus: str = \"orchid\"\n) -> List[Tuple[str, str]]:\n _corpus = corpus\n _tag = []\n if corpus == \"orchid_ud\":\n corpus = \"orchid\"\n if not words:\n return []\n\n if engine == \"perceptron\":\n from .perceptron import tag as tag_\n elif engine == \"artagger\":\n tag_ = _artagger_tag\n else: # default, use \"unigram\" (\"old\") engine\n from .unigram import tag as tag_\n _tag = tag_(words, corpus=corpus)\n\n if _corpus == \"orchid_ud\":\n _tag = _orchid_to_ud(_tag)\n\n return _tag", "def tag_ngram_12_backoff(self, untagged_string: str):\n untagged_tokens = wordpunct_tokenize(untagged_string)\n tagger = self._load_model(\"ngram_12_backoff\")\n tagged_text = tagger.tag(untagged_tokens)\n return tagged_text", "def train(self, tagged_sentences: Iterator[Tuple[TokenSeq, PosSeq]]) -> Tuple[NDArray, NDArray]:\n #add tokens\n for sentence in tagged_sentences:\n tokens, pos_tags = sentence\n for pos in pos_tags:\n self.pos_tags.append(pos)\n pos_tags.insert(0, \"<s>\")\n pos_tags.pop(len(pos_tags) - 1)\n for i in range(0, len(tokens)):\n temp_dict = {}\n temp_dict = add_features(tokens,pos_tags[i],i, temp_dict)\n self.features.append(temp_dict)\n #print(self.features)\n feature_matrix = self.vectorizer.fit_transform(self.features)\n label_vector = self.le.fit_transform(self.pos_tags)\n for i in range(0, len(label_vector)):\n self.l[self.pos_tags[i]] = i\n \n self.feature_matrix = feature_matrix\n self.label_vector = label_vector\n self.clf.fit(self.feature_matrix, self.label_vector)\n\n return (self.feature_matrix, label_vector)", "def viterbi_tags (untagged_sentences, h):\n transitions = h[0]\n emissions = h[1]\n tags = h[2]\n maxtags = []\n #print tags\n\n for untaggedsent in untagged_sentences:\n #Create empty probtable\n words = untaggedsent.split()\n r = len(tags)\n c = len(words)\n probtable = [None]*r\n for i in range(r):\n probtable[i] = [None]*c\n for j in range(c):\n probtable[i][j] = [None]*2\n\n #Initialize zeroth column of probtable\n prevtag = '<START>'\n word = words[0]\n for i in range(r):\n tag = tags[i]\n\n transition = transitions[prevtag][tag]\n if word in emissions[tag]:\n emission = emissions[tag][word]\n else:\n emission = .0001*emissions[tag]['<UNKNOWN>']\n\n probtable[i][0][0] = transition*emission\n \n #Fill in probtable\n for j in range(1, c):\n word = words[j]\n for i in range(r):\n tag = tags[i]\n maxprob = 0\n maxtag = None\n\n if word in emissions[tag]:\n emission = emissions[tag][word]\n else:\n emission = .0001*emissions[tag]['<UNKNOWN>']\n\n for k in range(r):\n prevtag = tags[k]\n transition = transitions[prevtag][tag]\n prob = probtable[k][j-1][0]*transition*emission\n \n if (prob > maxprob):\n maxprob = prob\n maxtag = k\n\n probtable[i][j][0] = maxprob\n probtable[i][j][1] = maxtag\n\n #Find most likely sequence of POS tags of this sentence\n sentmaxtags = maxsequence(probtable, tags)\n maxtags.extend(sentmaxtags)\n\n #Return most likely sequence of POS tags of all sentences\n return maxtags", "def pos_tag(self, sentence):\n tags = []\n tokens = sentence.split(\" \")\n for i in range(len(tokens)):\n tags.append('')\n for i in range (len(tokens)):\n feat = []\n feat.append(self.features(tokens,tags,i))\n tag_predicted = self.postagger.predict(feat)[0]\n tags[i] = tag_predicted\n return tags", "def _train(self, tagged_corpus, cutoff=0, verbose=False):\n\n token_count = hit_count = 0\n\n # A context is considered 'useful' if it's not already tagged\n # perfectly by the backoff tagger.\n useful_contexts = set()\n\n # Count how many times each tag occurs in each context.\n fd = ConditionalFreqDist()\n for sentence in tagged_corpus:\n tokens, tags = zip(*sentence)\n for index, (token, tag) in enumerate(sentence):\n # Record the event.\n token_count += 1\n context = self.context(tokens, index, tags[:index])\n if context is None:\n continue\n fd[context].inc(tag)\n # If the backoff got it wrong, this context is useful:\n if (self.backoff is None or\n tag != self.backoff.tag_one(tokens, index, tags[:index])):\n useful_contexts.add(context)\n\n # Build the context_to_tag table -- for each context, figure\n # out what the most likely tag is. Only include contexts that\n # we've seen at least `cutoff` times.\n for context in useful_contexts:\n best_tag = fd[context].max()\n hits = fd[context][best_tag]\n if hits > cutoff:\n self._context_to_tag[context] = best_tag\n hit_count += hits", "def PredictClerLabel(sentence, model_cler, word2vec):\n \n tokenized_sample = word_tokenize(re.sub(\"-\",\" \",sentence))\n features = np.mean([word2vec.word_vec(w) for w in tokenized_sample if w in word2vec],axis=0)\n prediction = model_cler.predict_proba(features.reshape(1,-1))[0]\n return model_cler.classes_[prediction.argmax()]", "def tagging (tagged_dataset, sentence) :\n tagged = open(tagged_dataset, \"rb\")\n tagged_ingredients = pickle.load(tagged)\n\n back = nltk.DefaultTagger('COMMENT')\n unigram_tagger = nltk.UnigramTagger(tagged_ingredients,backoff=back)\n bigram_tagger = nltk.BigramTagger(tagged_ingredients, backoff=unigram_tagger)\n tagged_ing = unigram_tagger.tag(sentence)\n \n return tagged_ing", "def MostCommonClassBaseline(training_set, test_set):\n pos_counts_dict = defaultdict(dict)\n max_pos_dict = dict()\n test_common_tags = []\n\n # Dictionary \"pos_counts_dict\" stores a dictionary for each word that stores counts of each pos of the word\n # This loop runs for each sentence (word, pos) in \"training_set\"\n for sentence in training_set:\n # This loop runs for each tuple (word, pos) in sentence\n for word_pos in sentence:\n # if word (word_pos[0]) not in \"pos_counts_dict\"\n if word_pos[0] not in pos_counts_dict:\n pos_counts_dict[word_pos[0]] = defaultdict(int)\n # increment for each tuple (word, pos) in sentence\n pos_counts_dict[word_pos[0]][word_pos[1]] += 1\n\n # Find most frequent tag associated to each word and store it in \"max_pos_dict\"\n # This loop runs for each word in \"pos_counts_dict\"\n for word in pos_counts_dict:\n count = 0\n tag = str()\n\n # This loop runs for each tag of the word\n for pos in pos_counts_dict[word]:\n if pos_counts_dict[word][pos] > count:\n count = pos_counts_dict[word][pos]\n tag = pos\n max_pos_dict[word] = tag\n\n # Match tag in \"max_pos_dict\" for each word of \"test_set\" and store in \"test_common_tags\"\n\n # This loop runs for each sentence (word, pos) in \"test_set\"\n for sentence in test_set:\n temp_sentence = []\n # This loop runs for no. of tuples (word, pos) in sentence\n for i in xrange(len(sentence)):\n # if word is in \"pos_counts_dict\" then store tuple (word, max count) in \"temp_sentence\"\n if sentence[i][0] not in pos_counts_dict:\n print \"Word not in training_set:\", tup[0]\n else:\n temp_sentence.append((sentence[i][0], max_pos_dict[sentence[i][0]]))\n test_common_tags.append(temp_sentence)\n\n return test_common_tags", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, org_dev_labels):\r\n\r\n # set to false to use bigram implementation instead\r\n # isUnigram = True\r\n isUnigram = True\r\n\r\n # return predicted labels of development set\r\n spam_words, spam_wordcount = parseIntoWordList(train_set, train_labels, 1)\r\n ham_words, ham_wordcount = parseIntoWordList(train_set, train_labels, 0)\r\n\r\n spamWords, spamProbs, spamUNK = createProbabilitiesList(spam_words, spam_wordcount, smoothing_parameter)\r\n hamWords, hamProbs, hamUNK = createProbabilitiesList(ham_words, ham_wordcount, smoothing_parameter)\r\n\r\n loggedSpam = np.log(spamProbs)\r\n loggedSpamUNK = np.log(spamUNK)\r\n loggedHam = np.log(hamProbs)\r\n loggedHamUNK = np.log(hamUNK)\r\n\r\n # Unigram\r\n dev_spam = []\r\n dev_ham = []\r\n\r\n dev_labels = []\r\n\r\n if isUnigram:\r\n for i in range(len(dev_set)):\r\n probSpam = 0\r\n probHam = 0\r\n\r\n for word in dev_set[i]:\r\n if word in spamWords:\r\n index = spamWords.index(word)\r\n probSpam += loggedSpam[index]\r\n else:\r\n probSpam += loggedSpamUNK\r\n\r\n if word in hamWords:\r\n index = hamWords.index(word)\r\n probHam += loggedHam[index]\r\n else:\r\n probHam += loggedHamUNK\r\n\r\n if (probSpam > probHam):\r\n dev_labels.append(1)\r\n else:\r\n dev_labels.append(0)\r\n\r\n else:\r\n for i in range(len(dev_set)):\r\n probSpam = 0\r\n probHam = 0\r\n\r\n for word in dev_set[i]:\r\n if word in spamWords:\r\n index = spamWords.index(word)\r\n probSpam += loggedSpam[index]\r\n else:\r\n probSpam += loggedSpamUNK\r\n\r\n if word in hamWords:\r\n index = hamWords.index(word)\r\n probHam += loggedHam[index]\r\n else:\r\n probHam += loggedHamUNK\r\n dev_spam.append(probSpam)\r\n dev_ham.append(probHam)\r\n # BiGram\r\n bi_spam_words, bi_spam_count = parseIntoBigramList(train_set, train_labels, 1)\r\n bi_ham_words, bi_ham_count = parseIntoBigramList(train_set, train_labels, 0)\r\n\r\n biSpamWords, biSpamProbs, biSpamUNK = createProbabilitiesList(bi_spam_words, bi_spam_count, smoothing_parameter)\r\n biHamWords, biHamProbs, biHamUNK = createProbabilitiesList(bi_ham_words, bi_ham_count, smoothing_parameter)\r\n\r\n biLoggedSpam = np.log(biSpamProbs)\r\n biLoggedSpamUNK = np.log(biSpamUNK)\r\n biLoggedHam = np.log(biHamProbs)\r\n biLoggedHamUNK = np.log(biHamUNK)\r\n\r\n # Bigram\r\n bi_dev_spam = []\r\n bi_dev_ham = []\r\n\r\n for i in range(len(dev_set)):\r\n biProbSpam = 0\r\n biProbHam = 0\r\n curr_email = dev_set[i]\r\n\r\n for j in range(len(curr_email) - 1):\r\n if (j % 2 == 1):\r\n continue\r\n curr_bigram = curr_email[j] + ' ' + curr_email[j + 1]\r\n\r\n if curr_bigram in biSpamWords:\r\n index = biSpamWords.index(curr_bigram)\r\n biProbSpam += biLoggedSpam[index]\r\n else:\r\n biProbSpam += biLoggedSpamUNK\r\n\r\n if curr_bigram in biHamWords:\r\n index = biHamWords.index(curr_bigram)\r\n biProbHam += biLoggedHam[index]\r\n else:\r\n probHam += biLoggedHamUNK\r\n bi_dev_spam.append(probSpam)\r\n bi_dev_ham.append(probHam)\r\n\r\n # Weights the models (1-lambda) multiplier for unigram and lamba multiplier for bigram\r\n dev_labels = getBigram(bi_dev_ham, bi_dev_spam, dev_ham, dev_set, dev_spam, org_dev_labels)\r\n\r\n return dev_labels", "def train(self, documents):\n ###DONE\n\n #entire vocab in document set D\n vocab_sod = set()\n vocab_pop = set()\n \n #Calcuates prior probabilities\n priorSOD = 0 #how many docs are spam\n priorPOP = 0 #how many docs are ham\n \n #Cacluates Tct\n term_freq_sod = {} #{term:occur, term:occur}\n term_freq_pop = {}\n \n #Tct'\n Tct_sod = 0 #Tct' = sum of (every term occurence in class c + 1)\n Tct_pop = 0\n \n for doc in documents: \n if 'sod' in doc.label:\n priorSOD += 1\n for token in doc.tokens:\n Tct_sod += 1\n if token in term_freq_sod.keys():\n term_freq_sod[token] = term_freq_sod[token] + 1\n else:\n term_freq_sod[token] = 1\n vocab_sod.add(token) \n else:\n priorPOP += 1\n for token in doc.tokens:\n Tct_pop += 1\n if token in term_freq_pop.keys():\n term_freq_pop[token] = term_freq_pop[token] + 1\n else:\n term_freq_pop[token] = 1\n vocab_pop.add(token)\n \n \n #endfor\n # | is for set join\n self.vocab = vocab_sod | vocab_pop #gets rid of duplicate words (those in both 'ham' and 'spam') \n \n #Tct Primes\n #tct' = term freq of all terms in class c + 1*(total terms)\n Tct_sod = Tct_sod + len(self.vocab) \n Tct_pop = Tct_pop + len(self.vocab) \n \n \n print(\"PriorSod: \" + str(priorSOD))\n print(\"PriorPop: \" + str(priorPOP))\n print(\"LEN Docum: \" + str(len(documents)))\n \n self.priorSOD = priorSOD / len(documents)\n self.priorPOP = priorPOP / len(documents)\n \n for term in self.vocab:\n if term in term_freq_pop.keys():\n self.cond_prob_pop[term] = (term_freq_pop[term] + 1) / Tct_pop\n else:\n self.cond_prob_pop[term] = 1 / Tct_pop\n \n if term in term_freq_sod.keys():\n self.cond_prob_sod[term] = (term_freq_sod[term] + 1) / Tct_sod\n else:\n self.cond_prob_sod[term] = 1 / Tct_sod\n \n \n pass", "def train(self, corpus):\n for sentence in corpus.corpus:\n cleanSentence = sentence.cleanSentence()\n for datum in cleanSentence.data:\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n self.total += 1\n\n i = 0\n while i < len(sentence.data) - 1:\n token = str(cleanSentence.get(i))\n self.followingWords[token].add(str(cleanSentence.get(i+1)))\n i += 1\n\n i = 1\n while i < len(sentence.data):\n bigram = str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.bigramCounts[bigram] = self.bigramCounts[bigram] + 1\n\n self.precedingWords[str(cleanSentence.get(i))].add(str(cleanSentence.get(i-1)))\n i += 1\n self.precedingWordsTotal = sum(map(lambda x: len(x), self.precedingWords.values()))\n\n i = 2\n while i < len(sentence.data):\n trigram = str(cleanSentence.get(i-2)) + \" \" + str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.trigramCounts[trigram] = self.trigramCounts[trigram] + 1\n i += 1\n\n #print('precedingWords')\n #print(self.precedingWords)\n #print('followingWords')\n #print(self.followingWords)\n #print('unigrams')\n #print(self.unigramCounts)\n #print('bigrams')\n #print(self.bigramCounts)\n\n #self.discount(self.trigramCounts)\n #self.discount(self.bigramCounts)\n #self.discount(self.unigramCounts)", "def build_vocab(text_list, threshold, vocab_path=\"word_vocab.pkl\", with_pos=True, tokenizer_type=\"mecab\"):\n\n def do_concurrent_tagging(start, end, text_list, counter):\n jpype.attachThreadToJVM()\n for i, text in enumerate(text_list[start:end]):\n text = text.strip()\n text = text.lower()\n\n try:\n if tokenizer_type == \"mecab\":\n tokens_ko = mecab.pos(text)\n else:\n tokens_ko = twitter.pos(text, norm=True) # , stem=True)\n if with_pos is True:\n tokens_ko = [str(pos[0]) + '/' + str(pos[1]) for pos in tokens_ko]\n else:\n tokens_ko = [str(pos[0]) for pos in tokens_ko]\n counter.update(tokens_ko)\n\n if i % 1000 == 0:\n logger.info(\"[%d/%d (total: %d)] Tokenized input text.\" % (\n start + i, start + len(text_list[start:end]), len(text_list)))\n\n except Exception as e: # for Out of memory\n print(e)\n continue\n\n counter = Counter()\n\n num_thread = 4\n thread_list = []\n n_x_text = len(text_list)\n for i in range(num_thread):\n thread_list.append(Thread(target=do_concurrent_tagging, args=(\n int(i * n_x_text / num_thread), int((i + 1) * n_x_text / num_thread), text_list, counter)))\n\n for thread in thread_list:\n thread.start()\n\n for thread in thread_list:\n thread.join()\n\n print(counter.most_common(10)) # print most common words\n words = [word for word, cnt in counter.items() if cnt >= threshold]\n\n vocab = Vocabulary()\n vocab.add_word(PAD)\n vocab.add_word(START_TOKEN)\n vocab.add_word(END_TOKEN)\n vocab.add_word(UNK)\n vocab.add_word(CLS)\n\n for i, word in enumerate(words):\n vocab.add_word(str(word))\n\n with open(vocab_path, 'wb') as f:\n pickle.dump(vocab, f)\n\n return vocab", "def make_thenThan_classifier(window=2, n_estimators=20):\n taggedSentTotal = len(brown.tagged_sents())\n\n thenSentTags = []\n thanSentTags = []\n\n for sentIndex, tagged_sent in enumerate(brown.tagged_sents()):\n sent = [x[0] for x in tagged_sent]\n if ('then' in sent):\n thenInd = sent.index('then')\n tags = [x[1] for x in tagged_sent[max(0,thenInd-window):\n min(thenInd+window+1,len(tagged_sent))\n ]\n ]\n #tags.extend(0)\n thenSentTags.append(tags)\n if ('than' in sent):\n thanInd = sent.index('than')\n tags = [x[1] for x in tagged_sent[max(0,thanInd-window):\n min(thanInd+window+1,len(tagged_sent))\n ]\n ]\n #tags.extend(1)\n thanSentTags.append(tags)\n\n # Convert the lists of then and than tag contexts to pandas dataframes, which we'll\n # then feed to our classifier for training\n\n thenData = pd.DataFrame(thenSentTags)\n thenData.columns = [\"Slot{}\".format(x-window) for x in thenData.columns]\n thenData['th{e|a}n'] = 0\n\n thanData = pd.DataFrame(thanSentTags)\n thanData.columns = [\"Slot{}\".format(x-window) for x in thanData.columns]\n thanData['th{e|a}n'] = 1\n\n allData = thenData.append(thanData)\n allData.drop('Slot0', axis=1, inplace=True)\n \n \"\"\"\n # convert categorical labels to one-hot encoding/dummy variables and specify the input\n # and output of the model\n\n dummyData = pd.get_dummies(allData)\n\n X = dummyData.loc[:,\"Slot-{}_'\".format(window):]\n y = dummyData['th{e|a}n']\n \n # now select and fit a model\n clf = RandomForestClassifier(n_estimators=n_estimators)\n clf.fit(X,y)\n \"\"\"\n \n # The block below is an attempt, following this SO post, to circumvent the nasty\n # dummy encoding wrangling I had to do previously\n # http://stackoverflow.com/questions/38574222/onehotencoded-features-causing-error-when-input-to-classifier/38587625#38587625\n \n X = allData.loc[:,:'Slot2']\n y = allData['th{e|a}n']\n clf = Pipeline([\n ('transformer', DictVectorizer()),\n ('estimator', RandomForestClassifier()),\n ]\n )\n print(y.head())\n print(X.head())\n clf.set_params(estimator__n_estimators=n_estimators).fit(X,y)\n \n return (clf, dummyData, allData)", "def predict(self, tokens: TokenSeq) -> PosSeq:\n _, pos_tags = self.predict_greedy(tokens)\n # _, _, pos_tags = self.predict_viterbi(tokens)\n return pos_tags", "def build_word_pos_vocab(text_list, threshold, word_vocab_path=\"word_vocab.pkl\", pos_vocab_path=\"pos_vocab.pkl\", tokenizer_type=\"mecab\"):\n\n def do_concurrent_tagging(start, end, text_list, word_counter, pos_counter):\n jpype.attachThreadToJVM()\n for i, text in enumerate(text_list[start:end]):\n text = text.strip()\n text = text.lower()\n\n try:\n if tokenizer_type == \"mecab\":\n tokens_ko = mecab.pos(text)\n else:\n tokens_ko = twitter.pos(text, norm=True) # , stem=True)\n\n word_tokens_ko = [str(pos[0]) for pos in tokens_ko]\n pos_tokens_ko = [str(pos[1]) for pos in tokens_ko]\n word_counter.update(word_tokens_ko)\n pos_counter.update(pos_tokens_ko)\n\n if i % 1000 == 0:\n print(\"[%d/%d (total: %d)] Tokenized input text.\" % (\n start + i, start + len(text_list[start:end]), len(text_list)))\n\n except Exception as e: # for Out of memory\n print(e)\n continue\n\n word_counter = Counter()\n pos_counter = Counter()\n\n num_thread = 4\n thread_list = []\n n_x_text = len(text_list)\n for i in range(num_thread):\n thread_list.append(Thread(target=do_concurrent_tagging, args=(\n int(i * n_x_text / num_thread), int((i + 1) * n_x_text / num_thread), text_list, word_counter, pos_counter)))\n\n for thread in thread_list:\n thread.start()\n\n for thread in thread_list:\n thread.join()\n\n print(word_counter.most_common(10)) # print most common words\n\n ## Word\n words = [word for word, cnt in word_counter.items() if cnt >= threshold]\n\n word_vocab = Vocabulary()\n word_vocab.add_word(PAD)\n word_vocab.add_word(START_TOKEN)\n word_vocab.add_word(END_TOKEN)\n word_vocab.add_word(UNK)\n word_vocab.add_word(CLS)\n\n for i, word in enumerate(words):\n word_vocab.add_word(str(word))\n\n with open(word_vocab_path, 'wb') as f:\n pickle.dump(word_vocab, f)\n\n ## Pos\n poss = [pos for pos, cnt in pos_counter.items() if cnt >= 0] # insert all pos tag\n pos_vocab = Vocabulary()\n pos_vocab.add_word(PAD)\n pos_vocab.add_word(START_TOKEN)\n pos_vocab.add_word(END_TOKEN)\n pos_vocab.add_word(UNK)\n pos_vocab.add_word(CLS)\n\n for i, pos in enumerate(poss):\n pos_vocab.add_word(str(pos))\n\n with open(pos_vocab_path, 'wb') as f:\n pickle.dump(pos_vocab, f)\n\n return word_vocab, pos_vocab", "def train(self, arg1=None, arg2=None, **kwargs):\n nltk.download('averaged_perceptron_tagger')\n nltk.download('wordnet')\n nltk.download('twitter_samples')\n nltk.download('punkt')\n nltk.download('stopwords')\n nltk.download('vader_lexicon')\n\n positive_tweets = twitter_samples.strings('positive_tweets.json')\n negative_tweets = twitter_samples.strings('negative_tweets.json')\n text = twitter_samples.strings('tweets.20150430-223406.json')\n tweet_tokens = twitter_samples.tokenized('positive_tweets.json')[0]\n\n stop_words = stopwords.words('english')\n\n positive_tweet_tokens = twitter_samples.tokenized('positive_tweets.json')\n negative_tweet_tokens = twitter_samples.tokenized('negative_tweets.json')\n\n positive_cleaned_tokens_list = []\n negative_cleaned_tokens_list = []\n\n for tokens in positive_tweet_tokens:\n positive_cleaned_tokens_list.append(self.remove_noise(tokens, stop_words))\n\n for tokens in negative_tweet_tokens:\n negative_cleaned_tokens_list.append(self.remove_noise(tokens, stop_words))\n\n all_pos_words = self.get_all_words(positive_cleaned_tokens_list)\n\n freq_dist_pos = FreqDist(all_pos_words)\n print(freq_dist_pos.most_common(20))\n\n positive_tokens_for_model = self.get_tweets_for_model(positive_cleaned_tokens_list)\n negative_tokens_for_model = self.get_tweets_for_model(negative_cleaned_tokens_list)\n\n positive_dataset = [(tweet_dict, \"Positive\")\n for tweet_dict in positive_tokens_for_model]\n\n negative_dataset = [(tweet_dict, \"Negative\")\n for tweet_dict in negative_tokens_for_model]\n\n dataset = positive_dataset + negative_dataset\n\n random.shuffle(dataset)\n\n train_data = dataset[:7000]\n test_data = dataset[7000:]\n\n self.classifier = NaiveBayesClassifier.train(train_data)", "def most_frequent_eval(test_set, pred_tags):\n gold_tag_seqs = []\n pred_tag_seqs = []\n for sent in test_set:\n words, true_tags = zip(*sent)\n gold_tag_seqs.append(true_tags)\n\n ### YOUR CODE HERE\n DEFAULT_TAG = 'O'\n \n pred_tags_list = []\n for word in words:\n tag = DEFAULT_TAG\n if word in pred_tags:\n tag = pred_tags[word]\n pred_tags_list.append(tag)\n pred_tag_seqs.append(tuple(pred_tags_list)) \n ### END CODE HERE\n\n return evaluate_ner(gold_tag_seqs, pred_tag_seqs)", "def classify(texts: List[str], params: Any) -> List[str]:\n\n alpha = 1\n token_probs_pos = params[\"token_probs_pos\"]\n token_probs_neg = params[\"token_probs_neg\"]\n all_words = params[\"all_words\"]\n M = len(all_words)\n cnt_pos_docs = params[\"cnt_pos_docs\"]\n cnt_neg_docs = params[\"cnt_neg_docs\"]\n\n sum_len_neg = params[\"sum_len_neg\"]\n sum_len_pos = params[\"sum_len_pos\"]\n pos_dict = params[\"pos_dict\"]\n neg_dict = params[\"neg_dict\"]\n\n\n test_texts = preprocessing(texts)\n test_tokenized_texts = text_to_tokens(test_texts)\n \n res = []\n log_pos_probablity = 0\n log_neg_probablity = 0\n i = 0\n for text in test_tokenized_texts:\n if (i % 5000 == 0):\n print(\"Classified\", i, \"texts\")\n i += 1\n log_pos_probablity = log(cnt_pos_docs)\n log_neg_probablity = log(cnt_neg_docs)\n for token in text:\n if (token_probs_pos[token] == 0):\n token_probs_pos[token] = alpha / (alpha * M + sum_len_pos)\n else:\n log_pos_probablity += log(token_probs_pos[token])\n if (token_probs_neg[token] == 0):\n token_probs_neg[token] = alpha / (alpha * M + sum_len_neg)\n else:\n log_neg_probablity += log(token_probs_neg[token])\n if (log_neg_probablity > log_pos_probablity):\n res.append(\"neg\")\n #for token in text:\n # all_words.add(token)\n # M = len(all_words)\n # neg_dict[token] += text[token]\n # sum_len_neg += text[token]\n # token_probs_neg[token] = (alpha + neg_dict[token]) / (alpha * M + sum_len_neg)\n\n else:\n res.append(\"pos\")\n #for token in text:\n # all_words.add(token)\n # M = len(all_words)\n # pos_dict[token] += text[token]\n # sum_len_pos += text[token]\n # token_probs_pos[token] = (alpha + pos_dict[token]) / (alpha * M + sum_len_pos)\n\n\n \n print('Predicted labels counts:')\n print(count_labels(res))\n return res", "def learn(self, docs, labels, alpha=1.0):\n assert len(docs)==len(labels)\n labelCounts = {l: 0 for l in self.CLASSES}\n wordCounts = {l: Counter() for l in self.CLASSES}\n totalWordCounts = {l: 0 for l in self.CLASSES}\n # iterate over documents in order to record\n for i in range(0, len(labels)):\n # count(y) in labelCounts\n l = labels[i]\n labelCounts[labels[i]] +=1\n # count(y,w) for all words in totalWordCounts\n totalWordCounts[labels[i]] += len(docs[i])\n words = docs[i]\n # count(y,word) in wordCounts,\n \n for word in words:\n wordCounts[labels[i]][word] += 1\n # and to store the training vocabulary in self.trainVocab\n self.trainVocab.add(word)\n # compute and store prior distribution over classes\n # (unsmoothed) in self.priorProbs\n print(\"Label,priorProbs,Label Count\", file=sys.stderr)\n for l in self.priorProbs:\n self.priorProbs[l] = np.divide(labelCounts[l], len(labels))\n print(l +\",\"+str(self.priorProbs[l])+\",\"+str(labelCounts[l]), file=sys.stderr) #This was for part one\n for word in self.trainVocab: \n self.likelihoodProbs[l][word] = np.divide(wordCounts[l][word]+self.ALPHA, totalWordCounts[l]+self.ALPHA*(len(self.trainVocab)+1))\n self.likelihoodProbs[l]['**OOV**'] = np.divide(self.ALPHA, totalWordCounts[l]+self.ALPHA*(len(self.trainVocab)+1))\n # Sanity checks--do not modify\n assert len(self.priorProbs)==len(self.likelihoodProbs)==len(self.CLASSES)>2\n assert .999 < sum(self.priorProbs.values()) < 1.001\n for y in self.CLASSES:\n assert .999 < sum(self.likelihoodProbs[y].values()) < 1.001,sum(self.likelihoodProbs[y].values())\n assert 0 <= self.likelihoodProbs[y]['**OOV**'] < 1.0,self.likelihoodProbs[y]['**OOV**']" ]
[ "0.6735268", "0.60845643", "0.5994172", "0.5816069", "0.58006746", "0.5712055", "0.55743456", "0.5564887", "0.54723716", "0.5455845", "0.5453202", "0.54045594", "0.5381295", "0.5375442", "0.5366399", "0.5317818", "0.5306032", "0.52813303", "0.5279574", "0.5199976", "0.51904786", "0.51879156", "0.5182076", "0.5175631", "0.51581466", "0.5140601", "0.5116432", "0.5115477", "0.5101463", "0.5086213" ]
0.7153057
0
Returns a backoff tagger that useses a UnigramTagger, BigramTagger, TrigramTagger, and a Default tagger that returns NN
def make_backoff_tagger(): return backoff_tagger(treebank.tagged_sents(), [UnigramTagger, BigramTagger, TrigramTagger], backoff=DefaultTagger('NN'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def backoff_tagger(train_sents, tagger_classes, backoff=None):\n\tfor cls in tagger_classes:\n\t\tbackoff = cls(train_sents,backoff=backoff)\n\treturn backoff", "def create_tagger():\n train_sents = brown.tagged_sents()\n\n # These regexes were lifted from the NLTK book tagger chapter.\n t0 = nltk.RegexpTagger(\n [(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers\n (r'(The|the|A|a|An|an)$', 'AT'), # articles\n (r'.*able$', 'JJ'), # adjectives\n (r'.*ness$', 'NN'), # nouns formed from adjectives\n (r'.*ly$', 'RB'), # adverbs\n (r'.*s$', 'NNS'), # plural nouns\n (r'.*ing$', 'VBG'), # gerunds\n (r'.*ed$', 'VBD'), # past tense verbs\n (r'.*', 'NN') # nouns (default)\n ])\n t1 = nltk.UnigramTagger(train_sents, backoff=t0)\n t2 = nltk.BigramTagger(train_sents, backoff=t1)\n t3 = nltk.TrigramTagger(train_sents, backoff=t2)\n return t3", "def tag_ngram_123_backoff(self, untagged_string: str):\n untagged_tokens = wordpunct_tokenize(untagged_string)\n tagger = self._load_model(\"ngram_123_backoff\")\n tagged_text = tagger.tag(untagged_tokens)\n return tagged_text", "def tag_ngram_12_backoff(self, untagged_string: str):\n untagged_tokens = wordpunct_tokenize(untagged_string)\n tagger = self._load_model(\"ngram_12_backoff\")\n tagged_text = tagger.tag(untagged_tokens)\n return tagged_text", "def choose_tag(self, tokens, index, history):\n raise AssertionError('SequentialBackoffTagger is an abstract class')", "def TaggerModel(config):\n tclass = _TAGGERS.get(config.tagger_model)\n if tclass is None:\n raise ValueError('Unknown tagger: %s' % config.tagger_model)\n if not config.label_list and not config.wandb_model:\n raise ValueError('Must specify a label list!')\n\n if config.wandb_model:\n return tclass.load_existing(config)\n return tclass.from_config(config)", "def tagger():", "def tagger(self, include_edgelabels=True):\n if self._tagger:\n return self._tagger\n\n def constructor():\n tagged_sents = self.tagged_sents(include_edgelabels)\n unigram_tagger = nltk.UnigramTagger(tagged_sents)\n bigram_tagger = nltk.BigramTagger(tagged_sents, backoff=unigram_tagger)\n return bigram_tagger\n\n self._tagger = _cached(self._tagger, TigerCorpusReader.STORAGE_ROOT + u\"/\" + TigerCorpusReader.TAGGER_FILE, constructor)\n return self._tagger", "def viterbi_bigrams(transition_probabilities, label_matches, prev_tag, word, tag_possibilities):\r\n\tmax_prob = 0\r\n\tbest_tag = \"\"\r\n\ttag_counts = get_tag_counts(label_matches)\r\n\tfor tag in tag_possibilities:\r\n\t\temissions_probability = get_emissions_probability(label_matches, tag, word, tag_counts)\r\n\t\ttag_bigram = (prev_tag,tag)\r\n\t\ttransition_probability = transition_probabilities.get(tag_bigram, 0.000027)\r\n\t\tprob = emissions_probability * transition_probability\r\n\t\tif prob > max_prob:\r\n\t\t\tmax_prob = prob\r\n\t\t\tbest_tag = tag\r\n\tif best_tag == \"\":\r\n\t\tbest_tag = \"o\"\r\n\treturn best_tag", "def viterbi_trigrams(transition_probabilities, label_matches, prev_tag, prev_prev_tag, word, tag_possibilities):\r\n\tmax_prob = 0\r\n\tbest_tag = \"\"\r\n\ttag_counts = get_tag_counts(label_matches)\r\n\tfor tag in tag_possibilities:\r\n\t\temissions_probability = get_emissions_probability(label_matches, tag, word, tag_counts)\r\n\t\ttag_trigram = (prev_prev_tag, prev_tag,tag)\r\n\t\ttransition_probability = transition_probabilities.get(tag_trigram, 0.000027)\r\n\t\tprob = emissions_probability * transition_probability\r\n\t\tif prob > max_prob:\r\n\t\t\tmax_prob = prob\r\n\t\t\tbest_tag = tag\r\n\tif best_tag == \"\":\r\n\t\tbest_tag = \"o\"\r\n\treturn best_tag", "def tag_one(self, tokens, index, history):\n tag = None\n for tagger in self._taggers:\n tag = tagger.choose_tag(tokens, index, history)\n if tag is not None:\n break\n return tag", "def gettag(query, lemmatag = False):\n import re\n if lemmatag is False:\n tag = 'n' # same default as wordnet\n # attempt to find tag from tregex query\n tagfinder = re.compile(r'^[^A-Za-z]*([A-Za-z]*)')\n tagchecker = re.compile(r'^[A-Z]{1,4}$')\n treebank_tag = re.findall(tagfinder, query.replace(r'\\w', '').replace(r'\\s', '').replace(r'\\b', ''))\n if re.match(tagchecker, treebank_tag[0]):\n if treebank_tag[0].startswith('J'):\n tag = 'a'\n elif treebank_tag[0].startswith('V') or treebank_tag[0].startswith('M'):\n tag = 'v'\n elif treebank_tag[0].startswith('N'):\n tag = 'n'\n elif treebank_tag[0].startswith('R'):\n tag = 'r'\n elif lemmatag:\n tag = lemmatag\n tagchecker = re.compile(r'^[avrn]$')\n while not re.match(tagchecker, lemmatag):\n time = strftime(\"%H:%M:%S\", localtime())\n selection = raw_input('\\n%s: WordNet POS tag \"%s\" not recognised.\\n It must be:\\n\\n ' \\\n ' a: (adjective)' \\\n ' n: (noun)' \\\n ' r: (adverb)' \\\n ' v: (verb)\\n\\nYour selection: ' % (time, lemmatag))\n lemmatag = selection\n return tag", "def tagging (tagged_dataset, sentence) :\n tagged = open(tagged_dataset, \"rb\")\n tagged_ingredients = pickle.load(tagged)\n\n back = nltk.DefaultTagger('COMMENT')\n unigram_tagger = nltk.UnigramTagger(tagged_ingredients,backoff=back)\n bigram_tagger = nltk.BigramTagger(tagged_ingredients, backoff=unigram_tagger)\n tagged_ing = unigram_tagger.tag(sentence)\n \n return tagged_ing", "def __tagsToNgrams__(self):\n bigrams = defaultdict(int)\n trigrams = defaultdict(int)\n for tags in self.getTags():\n tags = list(tags)\n for i in range(2):\n tags.insert(0, BEGIN)\n for k in range(2, len(tags)):\n trigrams[tuple(tags[k-2:k+1])] += 1\n bigrams[tuple(tags[k-1:k+1])] += 1\n return bigrams, trigrams", "def __init__(self, tag):\n self._tag = tag\n SequentialBackoffTagger.__init__(self, None)", "def get_default_tag(self, tags):\n tags_counter = Counter()\n for tag in tags:\n tags_counter[tag] += 1\n\n if len(tags_counter) == 2 and list(tags_counter.values())[0] == list(tags_counter.values())[1]:\n return ut.find_positive_tag(tags_counter.keys())\n\n return tags_counter.most_common(1)[0][0]", "def viterbi_tags (untagged_sentences, h):\n transitions = h[0]\n emissions = h[1]\n tags = h[2]\n maxtags = []\n #print tags\n\n for untaggedsent in untagged_sentences:\n #Create empty probtable\n words = untaggedsent.split()\n r = len(tags)\n c = len(words)\n probtable = [None]*r\n for i in range(r):\n probtable[i] = [None]*c\n for j in range(c):\n probtable[i][j] = [None]*2\n\n #Initialize zeroth column of probtable\n prevtag = '<START>'\n word = words[0]\n for i in range(r):\n tag = tags[i]\n\n transition = transitions[prevtag][tag]\n if word in emissions[tag]:\n emission = emissions[tag][word]\n else:\n emission = .0001*emissions[tag]['<UNKNOWN>']\n\n probtable[i][0][0] = transition*emission\n \n #Fill in probtable\n for j in range(1, c):\n word = words[j]\n for i in range(r):\n tag = tags[i]\n maxprob = 0\n maxtag = None\n\n if word in emissions[tag]:\n emission = emissions[tag][word]\n else:\n emission = .0001*emissions[tag]['<UNKNOWN>']\n\n for k in range(r):\n prevtag = tags[k]\n transition = transitions[prevtag][tag]\n prob = probtable[k][j-1][0]*transition*emission\n \n if (prob > maxprob):\n maxprob = prob\n maxtag = k\n\n probtable[i][j][0] = maxprob\n probtable[i][j][1] = maxtag\n\n #Find most likely sequence of POS tags of this sentence\n sentmaxtags = maxsequence(probtable, tags)\n maxtags.extend(sentmaxtags)\n\n #Return most likely sequence of POS tags of all sentences\n return maxtags", "def ner_tag_advertise(self, advertise: Dict[str, Any]):\n tmp_ad = advertise.copy()\n full_str: str = sc.debug_print(\n self.splitting_marking(text_input=tmp_ad[\"clean_text\"],\n ner_map=self.non_measure_map,\n measure_map=self.measure_map), self.debug)\n\n terms_input: List[str] = sc.debug_print(\n [self.reg_rules[\"ngram_clear_rgx\"].sub(\"\", word[0])\n for word in self.reg_rules[\"ngram_rgx\"].findall(full_str)], self.debug)\n\n model_input: [(str, (str, ...))] = sc.debug_print(self.get_tagged_sequence(terms_input), self.debug)\n\n # Build conflict dictionary\n clean_inputs: [(str, str)] = []\n conflict_words = dict()\n for word, ne in model_input:\n if len(ne) > 1:\n if word in conflict_words.keys():\n for tag in ne:\n conflict_words[word].add(tag)\n else:\n conflict_words[word] = {*ne}\n clean_inputs.append((word, random.choice(ne)))\n else:\n clean_inputs.append((word, ne[0]))\n\n tmp_ad[\"NER\"] = clean_inputs\n return tmp_ad", "def __init__(self, use_stanford=False, NER_model=None, NER_tagger=None,\n POS_model=None, POS_tagger=None):\n self.NER_model = NER_model\n self.NER_tagger = NER_tagger\n self.POS_model = POS_model\n self.POS_tagger = POS_tagger\n self.use_stanford = use_stanford\n\n if use_stanford:\n if NER_model is None or NER_tagger is None or POS_model is None \\\n or POS_tagger is None:\n sys.exit('tagging initialization: Stanford models and taggers'\n ' have to be provided!')\n else:\n self.post = StanfordPOSTagger(self.POS_model, self.POS_tagger).tag\n self.nert = StanfordNERTagger(self.NER_model, self.NER_tagger).tag\n else:\n self.post = nltk.pos_tag\n self.nert = nltk.ne_chunk", "def create_bleurt_preprocessing_ops(tokenizer, max_seq_length):\n\n def _py_encode(references, candidates):\n input_ids, input_mask, segment_ids = encoding.encode_batch(\n references, candidates, tokenizer, max_seq_length)\n return input_ids, input_mask, segment_ids\n\n def bleurt_preprocessing_ops(references, candidates):\n \"\"\"Builds a computation graph for BLEURT tokenization and encoding.\"\"\"\n return tf.numpy_function(\n func=_py_encode,\n inp=[references, candidates],\n Tout=(tf.int64, tf.int64, tf.int64))\n\n return bleurt_preprocessing_ops", "def __init__(self, tagger):\n self.tagger = tagger\n self.classifier = Perceptron()", "def _default_tcrsampler_olga_human_beta(default_background = None, default_background_if_missing=None):\n from tcrsampler.sampler import TCRsampler\n if default_background is None:\n default_background = 'olga_human_beta_t.sampler.tsv'\n \n if default_background_if_missing is None:\n default_background_if_missing ='olga_sampler.zip'\n \n print(default_background)\n\n try: \n t = TCRsampler(default_background=default_background)\n except OSError:\n t = TCRsampler()\n t.download_background_file(default_background_if_missing)\n t = TCRsampler(default_background=default_background)\n return t", "def _default_tcrsampler_human_beta(default_background = None, default_background_if_missing=None):\n from tcrsampler.sampler import TCRsampler\n if default_background is None:\n default_background = 'britanova_human_beta_t_cb.tsv.sampler.tsv'\n \n if default_background_if_missing is None:\n default_background_if_missing ='britanova_human_beta_t_cb.tsv.sampler.tsv.zip'\n \n \n print(default_background)\n\n try: \n t = TCRsampler(default_background=default_background)\n except OSError:\n t = TCRsampler()\n t.download_background_file(default_background_if_missing)\n t = TCRsampler(default_background=default_background)\n return t", "def taggerWord(self,word):\n if(\"tagger\" in self._classes):\n return self._tagger.taggerWord(word)", "def __init__(self,\n learning_rate=0.001,\n beta1=0.9,\n use_locking=False,\n name=\"GGT\",\n window=10,\n eps=1e-4,\n svd_eps=1e-6,\n sigma_eps=1e-2):\n super(GGTOptimizer, self).__init__(use_locking, name)\n self._set_hyper(\"lr\", learning_rate)\n self._set_hyper(\"beta1\", beta1)\n self._set_hyper(\"window\", window)\n self._set_hyper(\"eps\", eps)\n self._set_hyper(\"svd_eps\", svd_eps)\n self._set_hyper(\"sigma_eps\", sigma_eps)\n\n self.index_dict = {}\n self.shape_dict = {}", "def estimate_nb_tagger(counters,smoothing):\n nb_weights = estimate_nb([counters[tag] for tag in counters.keys()], counters.keys(), smoothing)\n tag_count = defaultdict(float)\n total_count = 0.\n for tag in counters.keys():\n tag_count[tag] = sum(counters[tag].values())\n total_count += sum(counters[tag].values())\n for tag in counters.keys():\n nb_weights[(tag, OFFSET)] = np.log(tag_count[tag] / total_count)\n return nb_weights", "def tag_word(self, w): \n if self.unknown(w):\n return self.default_tag\n else:\n return max(self.word_tags[w], key=self.word_tags[w].get)", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):\r\n # TODO: Write your code here\r\n # return predicted labels of development set\r\n retval = []\r\n smoothing_parameter = 0.0055\r\n # Generate a unigram BOW for both positive and negative reviews, choose the top 2500 words\r\n pos_bow, neg_bow = generate_unigram_BOW(train_set, train_labels)\r\n sorted_pos = sorted(pos_bow.items(), key=lambda x: x[1], reverse = True)\r\n sorted_neg = sorted(neg_bow.items(), key=lambda x: x[1], reverse = True)\r\n pos_words = sorted_pos[:].copy()\r\n neg_words = sorted_neg[:].copy()\r\n\r\n pos_bi_bow, neg_bi_bow = generate_bigram_BOW(train_set, train_labels)\r\n sorted_bi_pos = sorted(pos_bi_bow.items(), key=lambda x: x[1], reverse = True)\r\n sorted_bi_neg = sorted(neg_bi_bow.items(), key=lambda x: x[1], reverse = True)\r\n bi_pos_words = sorted_bi_pos[:].copy()\r\n bi_neg_words = sorted_bi_neg[:].copy()\r\n\r\n # Calculate the log probabilities each word given type\r\n pos_count = sum(pair[1] for pair in pos_words)\r\n neg_count = sum(pair[1] for pair in neg_words)\r\n bi_pos_count = sum(pair[1] for pair in bi_pos_words)\r\n bi_neg_count = sum(pair[1] for pair in bi_neg_words)\r\n\r\n log_probability_pos = {} #(word)->P(word|positive)\r\n log_probability_neg = {} #(word)->P(word|negative)\r\n log_prob_bi_pos = {}\r\n log_prob_bi_neg = {}\r\n\r\n for pair in pos_words:\r\n pos_prob = np.log((pair[1]+smoothing_parameter)/(pos_count+smoothing_parameter*(len(pos_words) + 1)))\r\n log_probability_pos[pair[0]] = pos_prob\r\n\r\n for pair in neg_words:\r\n neg_prob = np.log((pair[1]+smoothing_parameter)/(neg_count+smoothing_parameter*(len(neg_words) + 1)))\r\n log_probability_neg[pair[0]] = neg_prob\r\n\r\n for pair in bi_pos_words:\r\n bi_pos_prob = np.log((pair[1]+smoothing_parameter)/(bi_pos_count+smoothing_parameter*(len(bi_pos_words) + 1)))\r\n log_prob_bi_pos[pair[0]] = bi_pos_prob\r\n\r\n for pair in bi_neg_words:\r\n bi_neg_prob = np.log((pair[1]+smoothing_parameter)/(bi_neg_count+smoothing_parameter*(len(bi_neg_words) + 1)))\r\n log_prob_bi_neg[pair[0]] = bi_neg_prob\r\n # Finished training\r\n\r\n # For each of the new reviews from development data\r\n for review in dev_set:\r\n uni_pos = np.log(pos_prior)\r\n uni_neg = np.log(1 - pos_prior)\r\n for word in review:\r\n if word in log_probability_pos:\r\n uni_pos += log_probability_pos[word]\r\n elif word not in log_probability_pos:\r\n uni_pos += np.log(smoothing_parameter/(pos_count+smoothing_parameter*(len(pos_words) + 1)))\r\n\r\n if word in log_probability_neg:\r\n uni_neg += log_probability_neg[word]\r\n elif word not in log_probability_neg:\r\n uni_neg += np.log(smoothing_parameter/(neg_count+smoothing_parameter*(len(neg_words) + 1)))\r\n\r\n bi_pos = np.log(pos_prior)\r\n bi_neg = np.log(1 - pos_prior)\r\n for i in range(len(review)-1):\r\n currTuple = (review[i], review[i+1])\r\n if currTuple in log_prob_bi_pos:\r\n bi_pos += log_prob_bi_pos[currTuple]\r\n elif currTuple not in log_prob_bi_pos:\r\n bi_pos += np.log(smoothing_parameter/(bi_pos_count+smoothing_parameter*(len(bi_pos_words) + 1)))\r\n\r\n if currTuple in log_prob_bi_neg:\r\n bi_neg += log_prob_bi_neg[currTuple]\r\n elif currTuple not in log_prob_bi_neg:\r\n bi_neg += np.log(smoothing_parameter/(bi_neg_count+smoothing_parameter*(len(bi_neg_words) + 1)))\r\n\r\n MAP_pos = (1-0.4)*uni_pos + 0.4*bi_pos\r\n MAP_neg = (1-0.4)*uni_neg + 0.4*bi_neg\r\n\r\n if MAP_pos >= MAP_neg:\r\n retval.append(1)\r\n else:\r\n retval.append(0)\r\n\r\n return retval", "def _bert_encoder(self, tokens, attn_masks):\n output = self.bert(tokens, attn_masks)\n embedding = output['hidden_states'][-2]\n tag_prob = self.Softmax(self.hidden2tag(embedding))\n return tag_prob", "def tag(self) -> 'Tag':\n # project/lineage must exist so let's fetch it outside of try-except\n project = self.project.key\n lineage = self.lineage.key\n try:\n generation = self.key\n except self.Listing.Empty: # generation doesn't exist\n LOGGER.debug('No previous generations found - using a null tag')\n return NOTAG\n return TAGS(self.registry, project, lineage, generation)" ]
[ "0.6910925", "0.6843258", "0.63026106", "0.6082636", "0.5692276", "0.5538755", "0.5303569", "0.5225821", "0.51950496", "0.5185843", "0.5142859", "0.50902545", "0.50739115", "0.50673866", "0.5044054", "0.50221866", "0.49873263", "0.49494132", "0.48789895", "0.48588735", "0.48349178", "0.48216647", "0.4811698", "0.47981557", "0.47974417", "0.47762802", "0.47624767", "0.47619978", "0.47590855", "0.4758494" ]
0.8340529
0
should accept a dict or callable as first argument
def test_takes_dict_or_callable(self): scope1 = Scope({ 'where': 'foo' }) self.assertEqual(scope1.finder_options, { 'where': 'foo' }) call = lambda(cls): cls.where('foo') scope2 = Scope(call) self.assertEqual(scope2.callable, call)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def callableize(f_or_d):\n return f_or_d.get if isinstance(f_or_d,dict) else f_or_d", "def fn(*args, **kwargs):\n pass", "def callFuncBasedOnDict(func, argdict, **kwargs):\n if argdict is None:\n argdict = {}\n seldict = selectArgsFromDict(func, argdict)\n if kwargs is not None:\n seldict.update(kwargs)\n return func(**seldict)", "def fun_par_dict(fun: Callable, *args):\n if len(args) > 0:\n return fun(*args[:-1], **args[-1])\n else:\n return fun()", "def test_from_callable(self):\n def func(a: int = 0):\n return a\n fsig = FSignature.from_callable(func)\n assert len(fsig.parameters) == 1\n assert fsig.parameters['a'] == FParameter(\n kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,\n name='a',\n interface_name='a',\n default=0,\n type=int,\n )", "def __call__(fun_name):", "def __call__(value):", "def __call__(self, *args, **kwargs) -> Dict[str, Any]:\n pass", "def inner_test(param: dict):\n pass", "def getCallable():", "def virtual(func: \"callable\"):\n return func", "def test_delegates_callable(self):\n foo = self.Test.foo\n\n self.assertEqual(foo(), 123)\n\n foo(1, 2, 3, foo='bar')\n self.assertEqual(((1, 2, 3), dict(foo='bar')), self.last_call)", "def test_init_TypeError_when_weight_func_argument_is_not_Callable():\n weight_func = '---'\n err_msg = re.escape(\n (f\"Argument `weight_func` must be a Callable or a dict of \"\n f\"Callables. Got {type(weight_func)}.\")\n )\n with pytest.raises(TypeError, match = err_msg):\n ForecasterAutoregMultiSeriesCustom(\n regressor = LinearRegression(),\n fun_predictors = create_predictors,\n window_size = 5,\n weight_func = weight_func\n )", "def test_kw_validation_with_trait_type_instances(self):\n\n @function(x=Int(10), y=Int(20), _returns_=Int(30))\n def add(**kw):\n return kw['x'] + kw['y']\n\n self.assertEqual(add(x=8, y=2), 10)\n self.failUnlessRaises(TraitError, add, x=2, y='xxx')\n\n return", "def eval(*args, **kwargs)->Any:\n pass", "def test_direct_invocation_works():\n assert (_add)(*[1, 2], **{\"3\": 3, \"4\": 4}) == 10", "def any_(*args, **kwargs):\n ...", "def __init__(self, func):\n self.dictionary = {}\n self.func = func", "def parse_argdict(extras):\n return [(key, value() if callable(value) else value) for key, value in extras.items()]", "def test_star_args_with_dict():\n arg_dict = {'visited_color': 'orange',\n 'link_color': 'yellow',\n 'back_color': 'red',\n 'fore_color': 'blue'}\n assert arguments.fun_star_params(**arg_dict) == ('orange', 'yellow',\n 'red', 'blue')", "def test_fn_call_with_dict():\n l = [1, 2, 3, 4, 5]\n ds = [defaultdict(int), defaultdict(int), defaultdict(int)]\n for d in ds:\n for fn in [s7.div, s7.mul, s7.add, \"abcd\", 1234]:\n try:\n f = s7.count_fn_called_with_dict(dict_=d, fn=fn)\n for i in range(0, random.randint(2, 10)):\n f(*l)\n assert fn in d.keys() and d[fn] == (i + 1)\n except Exception as e:\n assert e.__class__.__name__ == TypeError.__name__", "def flexdictargs(func: Callable[[dict], RT]) -> Callable[[Iterable, Any], RT]:\n\n @wraps(func)\n def f(self, *args, **kwargs):\n if args and isinstance(args[0], MutableMapping):\n d = args[0]\n elif kwargs:\n d = kwargs\n else:\n raise TypeError(\"invalid input arguments\")\n return func(self, normalize(d))\n\n return f", "def test_kw_validation_with_trait_type_classes(self):\n\n @function(x=Int, y=Int, _returns_=Int)\n def add(**kw):\n return kw['x'] + kw['y']\n\n self.assertEqual(add(x=8, y=2), 10)\n self.failUnlessRaises(TraitError, add, x=2, y='xxx')\n\n return", "def __init__(self, fn: callable):\n self.fn = fn", "def json_in(fn):\n @wraps(fn)\n def new(arg):\n # convert the args in JSON to a python object\n arg = json.loads(arg)\n return fn(arg)\n return new", "def __call__(self, data, **kwargs):", "def test_apply_works():\n assert apply(_add, [1, 2], {\"3\": 3, \"4\": 4}) == 10", "def __call__(object):", "def __init__(self, function: Optional[Callable] = None,\n kwargs: Optional[Dict] = None):\n self.function: Callable = function\n\n if kwargs is None:\n kwargs = dict()\n self.kwargs: Dict[str, Any] = kwargs", "def wrap_gate(fn):\n return lambda parms: fn(**parms) if len(parms) > 0 else fn" ]
[ "0.6986333", "0.6356339", "0.6322947", "0.6199034", "0.59889823", "0.5970065", "0.59491587", "0.5934999", "0.5909235", "0.57952887", "0.57868856", "0.5782365", "0.57396305", "0.5720871", "0.571165", "0.57064", "0.5683914", "0.56789106", "0.5671449", "0.56660324", "0.5663809", "0.562556", "0.55994475", "0.5589871", "0.55889463", "0.5562111", "0.55618155", "0.55498594", "0.55281544", "0.55223185" ]
0.66485786
1
should raise exception on bad arguments
def test_errors_on_bad_argument(self): self.assertRaises(Exception, Scope, 'foo') self.assertRaises(Exception, Scope, 1) self.assertRaises(Exception, Scope, []) self.assertRaises(Exception, Scope, tuple())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test04(self):\n self.assertRaises(TypeError, robustApply, oneArgument, \"this\", blah=\"that\")", "def test_badargs(self):\n self.assertRaises(TypeError, isint, [])\n self.assertRaises(TypeError, isint, {})\n self.assertRaises(TypeError, isint, None)\n return", "def test_argchecks(self):\n self.assertRaises(ValueError, general.DataSet, numpy.random.rand(100, 2), numpy.random.rand(50, 1), None, None, None, None)", "def test_filter_args_error_msg():\r\n nose.tools.assert_raises(ValueError, filter_args, f, [])", "def test_invalidValues(self):\n argV = \"--fooint egg\".split()\n self.assertRaises(usage.UsageError, self.usage.parseOptions, argV)", "def raise_fail(*args, **kwargs):\n raise Exception(\"oops\")", "def test_args_bad_value(testapp, args, error):\n\n with pytest.raises(ValueError) as excinfo:\n next(archive.process(testapp, [], **args))\n assert str(excinfo.value) == error", "def test_non_existant_required_arg(self):\n with self.assertRaises(ValueError):\n _func = required_parameters('arg1', 'wibble', 'wobble')\n _func(undecorated_func)\n\n with self.assertRaises(ValueError):\n _func = mutually_exclusive_parameters(\n 'arg1',\n 'wibble',\n 'wobble'\n )\n _func(undecorated_func)", "def test_with_unknown_args(self):\n with self.assertRaises(ratd.cliargs.CliArgError):\n CliArgs('foomonkey')", "def test_enforcement_boundary_create_command_when_invalid_arguments_provided(\n err_msg, args, err_type, mock_client\n):\n with pytest.raises(err_type) as err:\n enforcement_boundary_create_command(mock_client, args)\n assert str(err.value) == err_msg", "def test_args_bad_value(testapp, args, error):\n\n with pytest.raises(ValueError) as excinfo:\n next(sitemap.process(testapp, [], **args))\n assert str(excinfo.value) == error", "def test_wrong_argument_for_encoding(self):\n with self.assertRaises(exceptions.WrongArgumentTypeError):\n positional.encode(4.5, 10)", "def test_area_methodwithargthrowerror(self):\n s3 = Square(3, 1, 3)\n with self.assertRaises(TypeError) as e:\n s3.area(9)\n self.assertEqual(str(e.exception),\n \"area() takes 1 positional argument but 2 were given\")", "def test_missing_generic_args(self):\n import System\n #TODO specify clearly which exception is appropriate here\n self.assertRaises(Exception, System.Collections.Generic.List)", "def test_04_one_args(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle(7)\n self.assertEqual(\"__init__() missing 1 required positional argument:\\\n 'height'\", str(x.exception))", "def test_nonCallable(self):\n us = WrongTypedOptions()\n argV = \"--barwrong egg\".split()\n self.assertRaises(TypeError, us.parseOptions, argV)", "def test_badNumberOfArgumentsToBuildNews(self):\n newsBuilder = NewsBuilder()\n self.assertRaises(SystemExit, newsBuilder.main, [])\n self.assertRaises(SystemExit, newsBuilder.main, [\"hello\", \"world\"])", "def test_invalid_path(self):\n self.assertRaises(argparse.ArgumentTypeError, generic.check_path, 'foo')", "def test_invalid_arguments(self):\n # More than two arguments should report an error.\n exit_code, output = run_cli('a', 'b', 'c')\n assert exit_code != 0\n assert \"Error\" in output\n # Invalid `ionice' values should report an error.\n exit_code, output = run_cli('--ionice=foo')\n assert exit_code != 0\n assert \"Error\" in output", "def bad_args(args):\n PARSER.print_help()\n exit(0)", "def test_argument_errors(self):\n method = self.Test.default_scope\n self.assertRaises(errors.ArgumentError,\n method,\n { 'where': 'foo' },\n where='bar')\n\n self.assertRaises(errors.ArgumentError, method, \"POOP\")", "def test_weirdCallable(self):\n us = WeirdCallableOptions()\n argV = \"--foowrong blah\".split()\n # ValueError is swallowed as UsageError\n e = self.assertRaises(usage.UsageError, us.parseOptions, argV)\n self.assertEquals(str(e), \"Parameter type enforcement failed: Yay\")\n\n us = WeirdCallableOptions()\n argV = \"--barwrong blah\".split()\n # RuntimeError is not swallowed\n self.assertRaises(RuntimeError, us.parseOptions, argV)", "def handle_invalid_arguments(e):\n errors = e.message\n return generic_errors(errors, code=400)", "def test_wrong_args(self, bad_context):\n with pytest.raises(TypeError):\n Connection(bad_context)", "def test_traffic_analysis_command_for_invalid_arguments(\n args, err_msg, err_type, mock_client\n):\n from IllumioCore import traffic_analysis_command\n\n with pytest.raises(err_type) as err:\n traffic_analysis_command(mock_client, args)\n assert str(err.value) == err_msg", "def test_with_empty_args(self):\n with self.assertRaises(TypeError):\n CliArgs()", "def test_arg_validation_all_seven_posn(self):\n assert_raises_message(\n TypeError,\n \"drivername must be a string\",\n url.URL,\n b\"somedriver\",\n \"user\",\n \"secret\",\n \"10.20.30.40\",\n 1234,\n \"DB\",\n {\"key\": \"value\"},\n )", "def invalid_args(event):\n\n s.sendReply(\n event,\n f'Please provide the proper arguments. Use \"@{s.BOT_NAME} help\" for help.',\n )", "def report_unexpected_exception(self, *args, **kwargs):\n pass", "def test_virtual_service_create_command_when_invalid_arguments_provided(\n err_msg, args, err_type, mock_client\n):\n with pytest.raises(err_type) as err:\n virtual_service_create_command(mock_client, args)\n assert str(err.value) == err_msg" ]
[ "0.76359093", "0.74616575", "0.7447246", "0.74302024", "0.7386324", "0.7267616", "0.7242593", "0.7234812", "0.7199537", "0.7185044", "0.7169754", "0.7130541", "0.711537", "0.71062267", "0.71003824", "0.70912004", "0.70899975", "0.70628417", "0.7052777", "0.7041206", "0.70096964", "0.6983652", "0.69798166", "0.6978614", "0.6972688", "0.6967568", "0.6965008", "0.69556373", "0.6949642", "0.69451624" ]
0.74869186
1
should set `model` to owner when instance is None
def test_sets_model_to_owner(self): self.assertEqual(self.Test.foo.model, self.Test)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_model(self, request, obj, form, change):\n try:\n owner = form.instance.owner\n except models.Application.owner.RelatedObjectDoesNotExist:\n form.instance.owner = request.user\n\n super().save_model(request, obj, form, change)", "def pre_save(self, obj):\n obj.owner = self.request.user", "def get_owner_object(self):\n return None", "def get_owner_object(self):\n return None", "def get_owner_object(self):\n return None", "def get_owner_object(self):\n return self", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def create_model(self):\n self.model = None\n pass", "def test_sets_model(self):\n scope = Scope()\n self.assertEqual(scope.model, None)", "def get_owner_object(self):\n return False", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def __get__(self, instance, owner):\n return self", "def set_model(self):\n self.model = self.get_model()", "def __get__(self, instance, owner):\r\n self.resource_meta = instance\r\n return self", "def __init__(self):\n self.model = None", "def __init__(self):\n self.model = None", "def save_model(self, request, obj, form, change):\n if not change:\n obj.creator = request.user\n obj.save()", "def _attach_to_model(self, model):\n self._model = model", "def model(self, model):\n \n self._model = model", "def test_save_model(self, client):\n type = Type1().save()\n assert type.owner is None", "def owner(self):\n return self.__owner", "def get_model(self):\n\t\treturn self.object.__class__", "def set_model(self, model=None):\n self.model = model", "def model(self, model):\n\n self._model = model" ]
[ "0.7634998", "0.67302233", "0.66623086", "0.66623086", "0.66623086", "0.64856637", "0.64180213", "0.64180213", "0.64180213", "0.64180213", "0.63496315", "0.6301929", "0.62846565", "0.62515026", "0.62515026", "0.62515026", "0.62515026", "0.6190546", "0.61800027", "0.6162175", "0.60536045", "0.60536045", "0.59939796", "0.59813756", "0.5975539", "0.5972142", "0.59658414", "0.59609425", "0.5953085", "0.5938079" ]
0.7304472
1
should delegate to `callable` when present
def test_delegates_callable(self): foo = self.Test.foo self.assertEqual(foo(), 123) foo(1, 2, 3, foo='bar') self.assertEqual(((1, 2, 3), dict(foo='bar')), self.last_call)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, *args, **kw):\n return self.callable(*args, **kw)", "def getCallable():", "def before_call(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> None:", "def callback(self, fun: Callable[[], None] | None) -> None:", "def func(cls):\n return cls.get_wrapper()(cls.callable)", "def virtual(func: \"callable\"):\n return func", "def is_callable(o):\n return callable(o)", "def _handle_callable(value: Any, annotation_args: Tuple[List[Any], Any]) -> bool:\n\n if not isinstance(value, Callable):\n return False\n\n # Here, we wish to compare a given callable with the annotation provided.\n # The only way to verify this information is through the type hints of the function.\n # Note that the `Callable` syntax does not indicate optional or keyword arguments,\n # so those are ignored if present.\n param_annotations, return_annotation = annotation_args\n signature = inspect.signature(function)\n indicated_return_annotation = signature.return_annotation\n\n # have to write functions to convert between `typing` and builtin\n if indicated_return_annotation != return_annotation:\n return False\n \n print(\"callable functionality WIP\")\n pass", "def _run_callable_with_postamble(postamble, callable_, *args, **kwargs):\n def fn():\n try:\n return callable_(*args, **kwargs)\n finally:\n postamble()\n return fn", "def apply(cls, func):\n raise NotImplementedError", "def is_callable(o):\n return isinstance(o, collections.Callable)", "def is_callable(obj):\n return callable(obj)", "def __call__(self, fn=None, *args, **kwargs):\n if callable(fn):\n self.fn = fn\n\n return self", "def should_execute_sync(target_callable):\n\n if (\n target_callable not in NO_WRAP_LIST\n ): # exclude the ones we know we don't want to wrap\n module = inspect.getmodule(\n target_callable\n ) # easy mode - does it have a module inspect can tell us about\n if module is None:\n if isinstance(target_callable, functools.partial):\n # partials - get the module from the function that's been wrapped\n module = inspect.getmodule(target_callable.func)\n elif \"method-wrapper\" in str(type(target_callable)):\n # bound to an instance of an object (in __self__)\n obj = target_callable.__self__\n if isinstance(obj, types.GeneratorType):\n # generators need some additional inspection to see if they're IDA code\n module = os.path.basename(obj.gi_code.co_filename).split(\".\")[0]\n else: # expect the object to have __module__\n module = inspect.getmodule(target_callable)\n elif \"wrapper_descriptor\" in str(type(target_callable)):\n # slot wrappers for things like base object implementation of __str__ - has a class in __objclass__\n module = inspect.getmodule(target_callable.__objclass__)\n\n if module is None:\n if hasattr(target_callable, \"__module__\"):\n if isinstance(\n target_callable.__module__, bridge.STRING_TYPES\n ): # if python2: unicode as well as str\n if isinstance(target_callable, type):\n # this is a type created for a remote bridge (e.g., inheriting from a local type, or in a remotify-ed module)\n # by default, we'll assume we should execute_sync. The only exception is if the __init__ is bridged - we need to let\n # that go back through the bridge normally, so we don't block if it in turn calls an IDA related function\n if (\n \"__init__\" in target_callable.__dict__\n and bridge._is_bridged_object(\n target_callable.__dict__[\"__init__\"]\n )\n ):\n # has a bridged init, no execute_sync - if the bridged init calls an ida function, we can execute_sync that when it comes in\n return False\n\n return True\n else: # not a type - probably a function in a remotified module - assume we need to execute_sync\n return True\n\n # don't know what this is, complain\n raise Exception(\n \"Unknown module for : \"\n + str(target_callable)\n + \" \"\n + str(type(target_callable))\n + \" \"\n + str(target_callable.__dict__)\n )\n\n if inspect.ismodule(module):\n module = module.__name__\n\n return is_ida_module(module)\n\n return False", "def callback(self, function: Optional[Callable[[int], None]]) -> None:", "def _make_callable(func):\n try:\n return func.evaluator()\n except AttributeError:\n return func", "def __call__(self, f):\n raise NotImplementedError()", "def wrapper(*args, **kwargs):\r\n return lambda: func(*args, **kwargs)", "def fn(*args, **kwargs):\n pass", "def __call__(self, f):\r\n return self.apply(f, None)", "def lift(cls, func):\n raise NotImplementedError", "def func(*args, **kwargs):\n return call(*args, **kwargs) # pylint: disable = E1102", "def is_callable(func: Any) -> bool:\n # noinspection PyTypeChecker\n return isinstance(func, (types.FunctionType, types.BuiltinFunctionType,\n types.MethodType, functools.partial))", "def before_call(\n self, cb: CircuitBreaker, func: Callable[..., T], *args: Any, **kwargs: Any\n ) -> None:", "def call(fn, arg):\n return fn(arg)", "def apply(self, func, *args, **kwargs):\n pass", "def maybe_call(obj, *args, **kwargs):\n if callable(obj):\n return obj(*args, **kwargs)\n return obj", "def __call__(fun_name):", "def __call__(self, *args, **kwargs):\n return self.call(*args, **kwargs)", "def dummy_callback(obj):\n pass" ]
[ "0.6989672", "0.6630756", "0.6587269", "0.6549553", "0.65392023", "0.6454149", "0.6429809", "0.63942474", "0.6285364", "0.62485355", "0.6232313", "0.6214799", "0.6207219", "0.62026733", "0.6182194", "0.6179252", "0.6174716", "0.61691743", "0.6093075", "0.60522413", "0.60278744", "0.6023863", "0.60226834", "0.6013741", "0.59986705", "0.59858567", "0.5982618", "0.59605956", "0.59423816", "0.59261864" ]
0.6868197
1
This function performs a grid search over a set of different learning rates and a number of hidden layer neurons.
def grid_search(verbose): # Load Ising data. Ising_Data = prepare_Ising_DNN() # Perform grid search over learning rate and number of hidden neurons. N_neurons=np.logspace(0,3,4).astype("int") # Check number of neurons over multiple decades. learning_rates=np.logspace(-6,-1,6) # Pre-allocate variables to store accuracy and loss data. train_loss=np.zeros((len(N_neurons),len(learning_rates)),dtype=np.float64) train_accuracy=np.zeros_like(train_loss) test_loss=np.zeros_like(train_loss) test_accuracy=np.zeros_like(train_loss) critical_loss=np.zeros_like(train_loss) critical_accuracy=np.zeros_like(train_loss) # Grid search. for i, neurons in enumerate(N_neurons): for j, lr in enumerate(learning_rates): print("training DNN with %4d neurons and SGD lr=%0.6f." %(neurons,lr) ) train_loss[i,j],train_accuracy[i,j],\ test_loss[i,j],test_accuracy[i,j],\ critical_loss[i,j],critical_accuracy[i,j] = evaluate_model(neurons,lr,Ising_Data,verbose) plot_data(learning_rates,N_neurons,train_accuracy, "training") plot_data(learning_rates,N_neurons,test_accuracy, "testing") plot_data(learning_rates,N_neurons,critical_accuracy, "critical")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grid_search(train_fun, decode_fun, eval_fun, train_set, dev_set, FLAGS):\n FLAGS.create_fresh_params = True\n\n hyperparameters = FLAGS.tuning.split(',')\n num_hps = len(hyperparameters)\n hp_range = hyperparam_range\n\n print(\"======== Grid Search ========\")\n print(\"%d hyperparameter(s): \" % num_hps)\n for i in xrange(num_hps):\n print(\"{}: {}\".format(\n hyperparameters[i], hp_range[hyperparameters[i]]))\n print()\n\n param_grid = [v for v in hp_range[hyperparameters[0]]]\n for i in xrange(1, num_hps):\n param_grid = itertools.product(param_grid, hp_range[hyperparameters[i]])\n\n best_hp_set = [-1] * num_hps\n best_seed = -1\n best_metrics_value = 0\n\n for row in param_grid:\n row = nest.flatten(row)\n for i in xrange(num_hps):\n setattr(FLAGS, hyperparameters[i], row[i])\n if hyperparameters[i] == 'universal_keep':\n setattr(FLAGS, 'sc_input_keep', row[i])\n setattr(FLAGS, 'sc_output_keep', row[i])\n setattr(FLAGS, 'tg_input_keep', row[i])\n setattr(FLAGS, 'tg_output_keep', row[i])\n setattr(FLAGS, 'attention_input_keep', row[i])\n setattr(FLAGS, 'attention_output_keep', row[i])\n\n print(\"Trying parameter set: \")\n for i in xrange(num_hps):\n print(\"* {}: {}\".format(hyperparameters[i], row[i]))\n\n # Try different random seed if tuning initialization\n num_trials = 5 if FLAGS.initialization else 1\n\n if FLAGS.dataset.startswith('bash'):\n metrics = [\"top1_temp_ms\", \"top1_cms\", \"top3_temp_ms\", \"top3_cms\"]\n metrics_weights = [0.4, 0.4, 0.1, 0.1]\n else:\n metrics = [\"top1_temp_ms\"]\n metrics_weights = [1]\n metrics_signature = '+'.join(\n ['{}x{}'.format(m, mw) for m, mw in zip(metrics, metrics_weights)])\n\n for t in xrange(num_trials):\n seed = random.getrandbits(32)\n tf.set_random_seed(seed)\n metrics_value = single_round_model_eval(train_fun, decode_fun,\n eval_fun, train_set, dev_set, metrics, metrics_weights)\n print(\"Parameter set: \")\n for i in xrange(num_hps):\n print(\"* {}: {}\".format(hyperparameters[i], row[i]))\n print(\"random seed: {}\".format(seed))\n print(\"{} = {}\".format(metrics_signature, metrics_value))\n print(\"Best parameter set so far: \")\n for i in xrange(num_hps):\n print(\"* {}: {}\".format(hyperparameters[i], best_hp_set[i]))\n print(\"Best random seed so far: {}\".format(best_seed))\n print(\"Best evaluation metrics so far = {}\".format(best_metrics_value))\n if metrics_value > best_metrics_value:\n best_hp_set = row\n best_seed = seed\n best_metrics_value = metrics_value\n print(\"☺ New best parameter setting found\")\n\n print()\n print(\"*****************************\")\n print(\"Best parameter set: \")\n for i in xrange(num_hps):\n print(\"* {}: {}\".format(hyperparameters[i], best_hp_set[i]))\n print(\"Best seed = {}\".format(best_seed))\n print(\"Best {} = {}\".format(metrics, best_metrics_value))\n print(\"*****************************\")", "def grid_search(self):\n\t\tchoice_apply_BN = [False] if self.debug else [False] # True, False\n\t\tchoice_apply_RD = [False] if self.debug else [False] # True, False\n\n\t\tchoice_layers = [3] if self.debug else [3] # 1, 2, 3, 4\n\t\tchoice_hd_hn_af = ['S'] if self.debug else ['R'] # 'R6' | 'RK' | 'S' activation function w.r.t. head hidden layers\n\t\tchoice_tl_af = ['S'] if self.debug else ['R'] # activation function for the last layer, sigmoid is suggested due to zero-prediction\n\t\tchoice_hd_hn_tl_af = None\n\n\t\tchoice_apply_tl_af = [True] # True, False\n\n\t\tif choice_hd_hn_tl_af is not None:\n\t\t\tfor BN, RD, num_layers, af, apply_tl_af in product(choice_apply_BN, choice_apply_RD, choice_layers,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t choice_hd_hn_tl_af, choice_apply_tl_af):\n\t\t\t\tffnns_para_dict = dict(FBN=False, BN=BN, RD=RD, num_layers=num_layers, HD_AF=af, HN_AF=af, TL_AF=af,\n\t\t\t\t\t\t\t\t\t apply_tl_af=apply_tl_af)\n\t\t\t\tsf_para_dict = dict()\n\t\t\t\tsf_para_dict['id'] = 'ffnns'\n\t\t\t\tsf_para_dict['ffnns'] = ffnns_para_dict\n\n\t\t\t\tself.sf_para_dict = sf_para_dict\n\t\t\t\tyield sf_para_dict\n\t\telse:\n\t\t\tfor BN, RD, num_layers, hd_hn_af, tl_af, apply_tl_af in product(choice_apply_BN, choice_apply_RD,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tchoice_layers, choice_hd_hn_af,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tchoice_tl_af, choice_apply_tl_af):\n\t\t\t\tffnns_para_dict = dict(FBN=False, BN=BN, RD=RD, num_layers=num_layers, HD_AF=hd_hn_af, HN_AF=hd_hn_af,\n\t\t\t\t\t\t\t\t\t TL_AF=tl_af, apply_tl_af=apply_tl_af)\n\t\t\t\tsf_para_dict = dict()\n\t\t\t\tsf_para_dict['id'] = 'ffnns'\n\t\t\t\tsf_para_dict['ffnns'] = ffnns_para_dict\n\n\t\t\t\tself.sf_para_dict = sf_para_dict\n\t\t\t\tyield sf_para_dict", "def grid_search(self):\n\t\t''' common settings without grid-search '''\n\t\tvali_k, cutoffs = 5, [1, 3, 5, 10, 20, 50]\n\n\t\tdo_log = False if self.debug else True\n\t\tcommon_eval_dict = dict(debug=self.debug, grid_search=True, dir_output=self.dir_output,\n\t\t\t\t\t\t vali_k=vali_k, cutoffs=cutoffs, do_log=do_log, log_step=2, do_summary=False, loss_guided=False)\n\n\t\t''' some settings for grid-search '''\n\t\tchoice_validation = [False] if self.debug else [True] # True, False\n\t\tchoice_epoch = [20] if self.debug else [100]\n\t\tchoice_mask_label = [False] if self.debug else [False]\n\t\tchoice_mask_ratios = [0.2] if self.debug else [0.2, 0.4, 0.6, 0.8] # 0.5, 1.0\n\t\tchoice_mask_type = ['rand_mask_rele'] if self.debug else ['rand_mask_rele']\n\n\t\tfor do_validation, num_epochs, mask_label in product(choice_validation, choice_epoch, choice_mask_label):\n\t\t\tif mask_label:\n\t\t\t\tfor mask_ratio, mask_type in product(choice_mask_ratios, choice_mask_type):\n\t\t\t\t\tself.eval_dict = dict(do_validation=do_validation, epochs=num_epochs, mask_label=mask_label,\n\t\t\t\t\t mask_ratio=mask_ratio, mask_type=mask_type)\n\t\t\t\t\tself.eval_dict.update(common_eval_dict)\n\t\t\t\t\tyield self.eval_dict\n\t\t\telse:\n\t\t\t\tself.eval_dict = dict(do_validation=do_validation, epochs=num_epochs, mask_label=mask_label)\n\t\t\t\tself.eval_dict.update(common_eval_dict)\n\t\t\t\tyield self.eval_dict", "def run_grid_search():\n\n best_score = 0\n best_learning_rate = 0\n best_discount_rate = 0\n best_initial_q_hat = 0\n trial_results = []\n number_of_trials = 30\n # TODO These ought to be done with numpy.arange but I don't have that package installed at the moment\n for learning_rate_raw in range(5, 50, 5):\n for discount_rate_raw in range(5, 20, 5):\n for initial_q_hat in range(0, 10, 1):\n learning_rate = learning_rate_raw * 0.01\n discount_rate = discount_rate_raw * 0.05\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=number_of_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n score = a.get_score()\n if score > best_score:\n best_score = score\n best_learning_rate = learning_rate\n best_discount_rate = discount_rate\n best_initial_q_hat = initial_q_hat\n trial_results.append((learning_rate, discount_rate, initial_q_hat, score, a.get_proportion_of_states_visited(), len(a.get_failed_trials())/ float(number_of_trials)))\n print \"Gridsearch finished, best learning rate: %.2f, best discount rate: %.2f, best initial q hat %i\" % (best_learning_rate, best_discount_rate, best_initial_q_hat)\n\n with open('gridsearch_results.csv', 'wb') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n spamwriter.writerow(('learning rate', 'discount factor', 'initial q-hat value', 'score', 'states visited', 'failed trials'))\n for result in trial_results:\n spamwriter.writerow(result)", "def gridSearch(xTrain, yTrain, xTest, yTest, model, modelParameters, hyperParameters, \n nFolds = 1, reTrain = True, plotGraphs = False):\n leastLoss = None\n bestModel = None\n bestHyperParams = None\n \n \"\"\"Generate the parameter grid\"\"\"\n parameterGrid = []\n gridKeys = []\n \n parameterGrid = list(product(*hyperParameters.values()))\n hyperParameterKeys = hyperParameters.keys()\n \n \"\"\"For plottong graphs\"\"\"\n if plotGraphs:\n plt.close()\n plotHeight = 10\n plotWidth = 20\n index = 0\n fig, axs = plt.subplots(len(parameterGrid), 2, figsize=(plotWidth, plotHeight * len(parameterGrid)))\n fig = plt.figure()\n fig.set_figheight(15)\n fig.set_figwidth(15)\n ax = fig.add_subplot(111, projection='3d')\n \n\n \"\"\"Grid search for cartesian product of hyperParameters\"\"\" \n for parameterMesh in parameterGrid:\n hyperParameterMesh = {}\n for k,v in zip(hyperParameterKeys, parameterMesh):\n hyperParameterMesh[k] = v\n \n \"\"\"Combine model Parameters\"\"\"\n updatedParam = modelParameters.copy()\n updatedParam.update(hyperParameterMesh)\n \n \"\"\"Perform grid search with cross validation\"\"\"\n if nFolds > 1:\n modelParams, trainLossList, testLossList, analysisMetricList = kFoldAnalysis(model = model,\n xTrain = xTrain,\n yTrain = yTrain,\n nFolds = nFolds,\n modelParameters = updatedParam) \n \n \n \"\"\"For storing best model\"\"\"\n avg = np.average(analysisMetricList)\n if leastLoss == None or avg < leastLoss:\n leastLoss = avg\n bestModel = modelParams\n bestHyperParams = hyperParameterMesh\n \n \"\"\"For plotting\"\"\"\n if plotGraphs:\n foldIndex = 1\n\n ax.scatter(hyperParameterMesh['alpha'], hyperParameterMesh['regularizationParameter'], \n avg, marker = 'o', label = str(hyperParameterMesh))\n \n\n for train, test in zip(trainLossList, testLossList):\n axs[index][0].plot(train, label = \"Fold-\" + str(foldIndex))\n axs[index][1].plot(test, label = \"Fold-\" + str(foldIndex))\n foldIndex = foldIndex + 1\n \n axs[index][0].legend()\n axs[index][0].grid()\n \n axs[index][1].legend()\n axs[index][1].grid()\n \n axs[index][0].set_title(\"Train set for \" + str(hyperParameterMesh))\n axs[index][1].set_title(\"Validation set for \" + str(hyperParameterMesh))\n \n index = index + 1\n \n \n \"\"\"Perform only grid search and no cross validation. Test set will be used for validation\"\"\" \n else:\n trainedModel, trainLoss, testLoss = model(xTrain, yTrain, xTest, yTest, **updatedParam)\n \n \"\"\"For storing best model\"\"\"\n if leastLoss == None or testLoss[-1] < leastLoss:\n leastLoss = testLoss[-1]\n bestModel = trainedModel\n bestHyperParams = hyperParameterMesh\n \n \"\"\"For plotting graphs\"\"\"\n if plotGraphs:\n axs[index][0].plot(trainLoss, label = \"Training set Loss for \" + str(hyperParameterMesh))\n axs[index][0].legend()\n axs[index][0].grid()\n axs[index][1].plot(testLoss, label = \"Test set Loss for \" + str(hyperParameterMesh))\n axs[index][1].legend()\n axs[index][1].grid()\n index = index + 1\n \n if plotGraphs:\n ax.legend()\n ax.set_xlabel('alpha')\n ax.set_ylabel('regularizationParameter')\n ax.set_zlabel('RMSE')\n\n plt.show()\n plt.close()\n \n if reTrain:\n \n \"\"\"Combine model Parameters\"\"\"\n updatedParam = modelParameters.copy()\n updatedParam.update(bestHyperParams)\n\n bestModel, trainLoss, testLoss = model(xTrain, yTrain, xTest, yTest, **updatedParam)\n print trainLoss[-1]\n print testLoss[-1]\n \n if plotGraphs:\n plt.close()\n plotHeight = 10\n plotWidth = 20\n fig, axs = plt.subplots(1, 2, figsize = (plotWidth, plotHeight)) \n \n plt.suptitle(\"Best model\")\n\n axs[0].plot(trainLoss, label = \"Training set Loss for \" + str(bestHyperParams))\n axs[0].legend()\n axs[0].grid()\n axs[1].plot(testLoss, label = \"Test set Loss for \" + str(bestHyperParams))\n axs[1].legend()\n axs[1].grid()\n \n plt.show()\n \n \n \n return bestModel, bestHyperParams", "def eval_gridsearch(N=500, v=10):\n print \"Evaluating gridsearch\"\n from sklearn.linear_model import LogisticRegressionCV, LogisticRegression\n from sklearn.grid_search import GridSearchCV\n X, X_noisy, signal_to_noise = simulate_signoise(N, v)\n\n X.columns = [str(xx) + \"_obs\" for xx in X.columns]\n X_noisy.columns = [str(xx).replace('_obs', \"_pred\") for xx in X.columns]\n\n df = pd.concat([X, X_noisy], axis=1)\n df[ml_vars.SAMPLE_KEY] = map(str, range(df.shape[0]))\n df[ml_vars.PROJECT_KEY] = \"Test\"\n\n Cs = [.01, .1, 1, 10, 100]\n LRCV_args = {\"model\": GridSearchCV,\n \"model_args\": [LogisticRegression(class_weight=\"auto\"),\n {\"C\": Cs}],\n \"model_kwargs\": {\"n_jobs\": 4, \"verbose\": 2}\n }\n LR_args = {\"model\": LogisticRegressionCV, \"model_kwargs\": {\"Cs\": Cs}}\n\n model1 = bm.WeightedNearestNeighbor(**LRCV_args)\n model2 = bm.WeightedNearestNeighbor(**LR_args)\n\n eval_sel1 = bm.EvalSelect(df, model1)\n eval_sel2 = bm.EvalSelect(df, model2)\n\n print \"fitting...\"\n eval_sel1.fit(X.columns, X_noisy.columns)\n eval_sel2.fit(X.columns, X_noisy.columns)\n\n print \"predicting...\"\n accuracy1 = eval_sel1.predict()\n accuracy2 = eval_sel2.predict()\n assert (accuracy1 - accuracy2).max() < .01, \"Grid search yielded discordant results\"", "def grid_search(self, params):\n train_X, train_y, dev_X, dev_y = self.extract_train_dev_data()\n clf = self.classifiers[0]\n pred_y = clf.grid_search(params, train_X, train_y, dev_X)\n logger.info(classification_report(dev_y, pred_y))", "def grid_search(self, model_name, params):\n model = self.model_dict[model_name]\n regr = ms.GridSearchCV(\n estimator=model, param_grid=params, cv=3, scoring='neg_mean_squared_error', n_jobs=1, verbose=1)\n regr.fit(self.data.loc[self.train_index, self.selected_features_],\n self.data.loc[self.train_index, self.target_name])\n self.best_params_ = regr.best_params_\n self.train_score_ = regr.best_score_", "def search_hyperparams(vocab_size, alpha_range, mbs_range, hid_units_range,\n max_grad_range, train_seqs, val_seqs, num_hp_samples=10):\n models_info = []\n for i in range(num_hp_samples):\n print('Model %s out of %s' % ((i+1), num_hp_samples))\n # learning rate\n r = np.random.rand() * (alpha_range[1] - alpha_range[0]) \\\n + alpha_range[0]\n alpha = 10 ** r\n \n # mini-batch size\n s = np.random.randint(mbs_range[0], mbs_range[1])\n mbs = 2 ** s\n \n # number of hidden units\n num_hidden_units = np.random.randint(hid_units_range[0], \n hid_units_range[1]) \n \n # max_grad\n mg = np.random.randint(max_grad_range[0], max_grad_range[1])\n \n rnn_model = rnn.RNN(num_hidden_units, vocab_size, vocab_size)\n rnn_model.train_mini_batch(train_seqs, train_seqs, epochs=NUM_EPOCHS, \n mini_batch_size=mbs, learning_rate=alpha, num_iter_msg=None, \n max_grad=mg, shift_X=True)\n train_cost = get_cost(rnn_model, train_seqs, vocab_size)\n val_cost = get_cost(rnn_model, val_seqs, vocab_size)\n \n # add to list of models tried in format:\n # (training accuracy, validation accuracy, alpha, mini-batch size,\n # number of hidden units, max_grad)\n model_info = (train_cost, val_cost, alpha, mbs, num_hidden_units,\n mg)\n models_info.append(model_info)\n\n # print out the networks based on which had the best validation cost\n models_info.sort(reverse=False, key=lambda model: model[1])\n return models_info", "def GridSearchTraining(X_train, y_train, param_grid,k): \n # create model\n Kmodel = KerasRegressor(build_fn=create_model, verbose=2)\n # start grid search \n grid = GridSearchCV(estimator=Kmodel, param_grid=param_grid, n_jobs=1,scoring='neg_mean_absolute_error', cv=k)\n grid_result = grid.fit(X_train, y_train)\n # summarize results\n print('='*50)\n print(\"Best: %f using %s\" % (grid_result.best_score_, grid_result.best_params_))\n means = grid_result.cv_results_['mean_test_score']\n stds = grid_result.cv_results_['std_test_score']\n params = grid_result.cv_results_['params']\n for mean, stdev, param in zip(means, stds, params):\n print(\"%f (%f) with: %r\" % (mean, stdev, param))", "def score_grid():\r\n\t\r\n\tp = 'results\\\\mnist_filter'\r\n\t(tr_x, tr_y), (te_x, te_y) = load_mnist()\r\n\t\r\n\t# Get the SPs\r\n\tsps = [load(os.path.join(p, sp)) for sp in os.listdir(p) if sp[2] == '0']\r\n\tsp2 = load(os.path.join(p, 'sp1-0.pkl'))\r\n\t\r\n\tnwindows = 26 ** 2\r\n\tnfeat = 100 * nwindows\r\n\t\r\n\t# w = [sp2.p[sp2.syn_map == j] for j in xrange(nfeat)]\r\n\t# ms = max(wi.shape[0] for wi in w)\r\n\t# with open(os.path.join(p, 'data.pkl'), 'wb') as f:\r\n\t\t# cPickle.dump((w, ms), f, cPickle.HIGHEST_PROTOCOL)\r\n\twith open(os.path.join(p, 'data.pkl'), 'rb') as f:\r\n\t\tw, ms = cPickle.load(f)\r\n\t\r\n\t# Get training data\r\n\ttr_x2 = np.zeros((tr_x.shape[0], nfeat))\r\n\tfor i, x in enumerate(tr_x):\r\n\t\tnx = extract_patches_2d(x.reshape(28, 28), (3, 3)).reshape(\r\n\t\t\tnwindows, 9)\r\n\t\tx = np.array(np.zeros(nfeat), dtype='bool')\r\n\t\tfor j, (xi, sp) in enumerate(izip(nx, sps)):\r\n\t\t\tsp.step(xi)\r\n\t\t\tx[j*100:(j*100)+100] = sp.y[:, 0]\r\n\t\t\r\n\t\ty = sp2.p * x[sp2.syn_map]\r\n\t\tw = np.zeros((nfeat, ms))\r\n\t\tfor j in xrange(nfeat):\r\n\t\t\ta = y[sp2.syn_map == j]\r\n\t\t\tw[j][:a.shape[0]] = a\r\n\t\ttr_x2[i] = np.mean(w, 1)\r\n\t\r\n\t# Get testing data\r\n\tte_x2 = np.zeros((te_x.shape[0], nfeat))\r\n\tfor i, x in enumerate(te_x):\r\n\t\tnx = extract_patches_2d(x.reshape(28, 28), (3, 3)).reshape(\r\n\t\t\tnwindows, 9)\r\n\t\tx = np.array(np.zeros(nfeat), dtype='bool')\r\n\t\tfor j, (xi, sp) in enumerate(izip(nx, sps)):\r\n\t\t\tsp.step(xi)\r\n\t\t\tx[j*100:(j*100)+100] = sp.y[:, 0]\r\n\t\t\r\n\t\ty = sp2.p * x[sp2.syn_map]\r\n\t\tw = np.zeros((nfeat, ms))\r\n\t\tfor j in xrange(nfeat):\r\n\t\t\ta = y[sp2.syn_map == j]\r\n\t\t\tw[j][:a.shape[0]] = a\r\n\t\tte_x2[i] = np.mean(w, 1)\r\n\t\r\n\t# Classify\r\n\tclf = LinearSVC(random_state=123456789)\r\n\tclf.fit(tr_x2, tr_y)\r\n\tprint 'SVM Accuracy : {0:2.2f} %'.format(clf.score(te_x2, te_y) * 100)", "def fit_grid():\r\n\t\r\n\tp = 'results\\\\mnist_filter'\r\n\t# try:\r\n\t\t# os.makedirs(p)\r\n\t# except OSError:\r\n\t\t# pass\r\n\tnp.random.seed(123456789)\r\n\t# kargs = {\r\n\t\t# 'ninputs': 9,\r\n\t\t# 'ncolumns': 100,\r\n\t\t# 'nsynapses': 5,\r\n\t\t# 'random_permanence': True,\r\n\t\t# 'pinc':0.03, 'pdec':0.05,\r\n\t\t# 'seg_th': 3,\r\n\t\t# 'nactive': 10,\r\n\t\t# 'duty_cycle': 100,\r\n\t\t# 'max_boost': 10,\r\n\t\t# 'global_inhibition': True,\r\n\t\t# 'trim': 1e-4\r\n\t# }\r\n\tkargs2 = {\r\n\t\t'ninputs': 100 * (26 ** 2),\r\n\t\t'ncolumns': 2048,\r\n\t\t'nsynapses': 1000,\r\n\t\t'random_permanence': True,\r\n\t\t'pinc':0.03, 'pdec':0.05,\r\n\t\t'seg_th': 5,\r\n\t\t'nactive': 20,\r\n\t\t'duty_cycle': 100,\r\n\t\t'max_boost': 10,\r\n\t\t'global_inhibition': True,\r\n\t\t'trim': 1e-4\r\n\t}\r\n\t\r\n\t# Get the data\r\n\t(tr_x, tr_y), (te_x, te_y) = get_data()\r\n\tnwindows = 26 ** 2\r\n\t\r\n\t# # Make the SPs\r\n\t# sps = [SPRegion(**kargs) for _ in xrange(nwindows)]\r\n\t\r\n\t# # Train the SPs\r\n\t# nepochs = 10\r\n\t# t = time.time()\r\n\t# for i in xrange(nepochs):\r\n\t\t# print i\r\n\t\t# for j, x in enumerate(tr_x):\r\n\t\t\t# print '\\t{0}'.format(j)\r\n\t\t\t# nx = extract_patches_2d(x.reshape(28, 28), (3, 3)).reshape(\r\n\t\t\t\t# nwindows, 9)\r\n\t\t\t# for xi, sp in izip(nx, sps):\r\n\t\t\t\t# sp.step(xi)\r\n\t# t1 = time.time() - t\r\n\t# print t1\r\n\t\r\n\t# # Save this batch of SPs\r\n\t# for i, sp in enumerate(sps):\r\n\t\t# sp.learn = False\r\n\t\t# sp.save(os.path.join(p, 'sp0-{0}.pkl'.format(i)))\r\n\t\r\n\t# Make the top level SP\r\n\tsp2 = SPRegion(**kargs2)\r\n\t\r\n\t# Get the SPs\r\n\tsps = [load(os.path.join(p, sp)) for sp in os.listdir(p) if sp[2] == '0']\r\n\t\r\n\t# Train the top SP\r\n\tnepochs = 10\r\n\tt = time.time()\r\n\tfor i in xrange(nepochs):\r\n\t\tprint i\r\n\t\tfor j, x in enumerate(tr_x):\r\n\t\t\tprint '\\t{0}'.format(j)\r\n\t\t\tnx = extract_patches_2d(x.reshape(28, 28), (3, 3)).reshape(\r\n\t\t\t\tnwindows, 9)\r\n\t\t\toutput = np.array(np.zeros(100 * nwindows), dtype='bool')\r\n\t\t\tfor k, (xi, sp) in enumerate(izip(nx, sps)):\r\n\t\t\t\tsp.step(xi)\r\n\t\t\t\toutput[k*100:(k*100)+100] = sp.y[:, 0]\r\n\t\t\tsp2.step(output)\r\n\tt2 = time.time() - t\r\n\tprint t2\r\n\t\r\n\t# Save the top SP\r\n\tsp2.learn = False\r\n\tsp2.save(os.path.join(p, 'sp1-0.pkl'))", "def parameter_tuning(D, param_grid):\n grid = ParameterGrid(param_grid)\n\n for params in grid:\n model_file = 'Theshpairs1_Ind_5' + '_emb_' + str(params['embedding_size']) + '_nr_' + str(\n params['negative_ratio']) + \\\n '_batch_' + str(params['batch_size']) + '_epochs_' \\\n + str(params['nb_epochs']) + '_classification_' + str(params['classification'])\n\n print(model_file)\n\n # Train Model\n Prio = NNEmbeddings(D, embedding_size=params['embedding_size'], negative_ratio=params['negative_ratio'],\n nb_epochs=params['nb_epochs'], batch_size=params['batch_size'],\n classification=params['classification'], save=True,\n model_file='Models/' + model_file + '.h5')\n\n # New Predicitons\n df_metrics = Prio.predict(pickle_file=None)\n plot_single(df_metrics)\n plot_metric(df_metrics, name='Plot_Metrics/' + model_file + '.png')", "def search(x_data, y_data, n = 5):\r\n alpha = np.arange(0.01, 8, 0.01)\r\n param_grid = {'alpha' : alpha} \r\n clf = MultinomialNB() \r\n grid_search = GridSearchCV(clf, param_grid, cv=n)\r\n grid_search.fit(x_data, y_data)\r\n return grid_search.best_params_", "def do_gridsearch():\n df = read_df()\n X = df['review']\n y = df['sentiment']\n X_train, X_holdout, y_train, y_holdout = train_test_split(X, y, test_size=0.3, shuffle=True, stratify=y, random_state=222 )\n tfidf = TfidfVectorizer(stop_words='english', max_df=0.8)\n\n stem_pipeline = make_pipeline(TextNormalizer(), tfidf, LogisticRegression()) # LogisticRegression(C=100) or MultinomialNB()\n param_grid = {'logisticregression__C': [10, 50, 100],\n 'tfidfvectorizer__ngram_range': [(1, 1), (1, 2), (1, 3), (1, 4)],\n 'tfidfvectorizer__min_df': [2, 3, 4, 5]}\n cv = StratifiedShuffleSplit(n_splits=3, test_size=0.2)\n grid = GridSearchCV(stem_pipeline, param_grid, cv=cv, n_jobs=-1)\n grid.fit(X_train, y_train)\n print(grid.best_estimator_)\n print(grid.best_score_)\n print(grid.best_params_)", "def FindGrid(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def perform_grid_search_with_cv(self, train_set):\n if train_set:\n print(\"Running grid search to find optimal hyper parameters\")\n self.LOG_HANDLE.info(\"Running grid search to find optimal hyper parameters\")\n\n param_grid = {'k': [30, 40, 50], 'min_k': [1, 3, 5],\n 'sim_options':\n {\n 'name': ['cosine', 'pearson', 'msd'],\n 'user_based': [False]\n }\n }\n gs = GridSearchCV(KNNWithMeans, param_grid, measures=model_params.all_models_training_error_measures, cv=model_params.cross_validation_folds)\n gs.fit(train_set)\n\n # best RMSE score\n print(\"Best RMSE after CV: \")\n print(gs.best_score['rmse'])\n self.LOG_HANDLE.info(gs.best_score['rmse'])\n\n # combination of parameters that gave the best RMSE score\n print(\"Best parameters after CV: \")\n print(gs.best_params['rmse'])\n self.LOG_HANDLE.info(gs.best_params['rmse'])", "def train_candidates(nb_layers = 4, nb_neurons = 300, p_drop = 0.15, nb_exp = 50):\n\n print(\"Calculation will be performed on {}\".format(device))\n \n # custom data loader, automatically sent to device\n ds = imelt.data_loader()\n\n for i in range(nb_exp):\n\n print(\"Training model {}\".format(i))\n print(\"...\\n\")\n name = \"./model/candidates/l\"+str(nb_layers)+\"_n\"+str(nb_neurons)+\"_p\"+str(p_drop)+\"_m\"+str(i)+\".pth\"\n\n # declaring model\n neuralmodel = imelt.model(ds.x_visco_train.shape[1],nb_neurons,nb_layers,ds.nb_channels_raman,\n activation_function = torch.nn.GELU(), p_drop=p_drop\n )\n\n # criterion for match\n criterion = torch.nn.MSELoss(reduction='mean')\n criterion.to(device) # sending criterion on device\n\n # we initialize the output bias and send the neural net on device\n neuralmodel.output_bias_init()\n neuralmodel = neuralmodel.float()\n neuralmodel.to(device)\n\n optimizer = torch.optim.Adam(neuralmodel.parameters(), lr = 0.0003, weight_decay=0.00) # optimizer\n neuralmodel, record_train_loss, record_valid_loss = imelt.training(neuralmodel, ds, criterion, optimizer, \n save_switch=True, save_name=name, nb_folds=1, train_patience=400, min_delta=0.05, verbose=True)", "def parameter_grid_search(X, y, model, metric, transform_grid, param_grid,\n test_split_size=0.2, verbose=False, logger=None):\n print_status_message('Beginning parameter grid search...', verbose, logger)\n t0 = time.time()\n params_list = list(ParameterGrid(param_grid))\n X_train, X_eval, y_train, y_eval = train_test_split(X, y, test_size=test_split_size)\n\n for transforms in transform_grid:\n print_status_message('Transforms = {0}'.format(str(transforms)), verbose, logger)\n print_status_message('', verbose, logger)\n print_status_message('', verbose, logger)\n transforms = fit_transforms(X_train, y_train, transforms)\n X_train = apply_transforms(X_train, transforms)\n X_eval = apply_transforms(X_eval, transforms)\n\n for params in params_list:\n tsub0 = time.time()\n for param, value in params.iteritems():\n print(param + \" = \" + str(value))\n setattr(model, param, value)\n\n print_status_message('Fitting model...', verbose, logger)\n model.fit(X_train, y_train)\n\n train_score = predict_score(X_train, y_train, model, metric)\n print_status_message('Training score = {0}'.format(str(train_score)), verbose, logger)\n\n eval_score = predict_score(X_eval, y_eval, model, metric)\n print_status_message('Evaluation score = {0}'.format(str(eval_score)), verbose, logger)\n\n tsub1 = time.time()\n print_status_message('Model trained in {0:3f} s.'.format(tsub0 - tsub1), verbose, logger)\n print_status_message('', verbose, logger)\n print_status_message('', verbose, logger)\n\n t1 = time.time()\n print_status_message('Grid search complete in {0:3f} s.'.format(t0 - t1), verbose, logger)", "def grid_search_learning_curve(base_model, train, test, param_grid,\n user_index=None, patk=5, epochs=range(2, 40, 2)):\n curves = []\n keys, values = zip(*param_grid.items())\n for v in itertools.product(*values):\n params = dict(zip(keys, v))\n this_model = copy.deepcopy(base_model)\n print_line = []\n for k, v in params.items():\n setattr(this_model, k, v)\n print_line.append((k, v))\n\n print(' | '.join('{}: {}'.format(k, v) for (k, v) in print_line))\n _, train_patk, train_mse, test_patk, test_mse = learning_curve(this_model, train, test,\n epochs, k=patk, user_index=user_index)\n curves.append({'params': params,\n 'patk': {'train': train_patk, 'test': test_patk},\n 'mse': {'train': train_mse, 'test': test_mse}})\n return curves", "def _run_fn_gridsearch(clfname, datasets, targets, fnsearch):\n for dataset, target in zip(datasets, targets):\n\n logging.info(f\"Initializing {clfname} search for {dataset}...\")\n data = load_dataset_df(dataset)\n\n train, test = split_test_train(data, target)\n train, test = scale_test_train(train, test)\n results, best, scorer = fnsearch(*train)\n\n models = load_best_model_json()\n if dataset not in models:\n models[dataset] = dict()\n\n predicted = best.predict(test.X)\n tested = recall_score(test.y, predicted)\n\n models[dataset][clfname] = {\n \"params\": best.get_params(),\n \"train\": results[0][1],\n \"test\": tested,\n \"scorer\": scorer,\n }\n\n update_best_model_json(models)\n\n print(f\"{'*'*20} Running {clfname} GridSearch for {dataset} {'*'*20}\")\n print(round(data[target].value_counts() / len(data[target]) * 100, 1), \"\\n\")\n\n print(f\"{'*'*10} Best fit {clfname} model {'*'*10}\")\n print(best, \"\\n\")\n\n print(f\"Score of best fit on test is: {round(tested, 5)}\\n\")\n\n print(f\"{'*'*10} Top N model params {'*'*10}\")\n for index in range(min(15, len(results))):\n print(f\"{index+1:02}. {results[index][0]} == {round(results[index][1], 5)}\")\n\n return None", "def grid_search(self, x_data, y_data, tuning_params, custom_kfold=None):\n for i, model in enumerate(self.list_of_models):\n grid = GridSearchCV(model, tuning_params[i], cv=custom_kfold, scoring='f1', n_jobs=-1, verbose=3)\n grid.fit(x_data, y_data)\n params = grid.best_params_\n trained_model = grid.best_estimator_\n self.trained_models.append(trained_model)\n p = re.compile(r\"(.*)\\(.*\")\n model_name = re.match(p, str(trained_model)).group(1)\n print \"for {} model, best parameters were: {}\".format(model_name, params)\n print \"its f1 score was: {} \\n\".format(grid.cv_results_['mean_test_score'][0])", "def grid(self):\n self.best_params_list = clust_grid(self.model, self.param_grid, self.X, self.y, self.model_mask_cols)", "def _do_grid_search_round(self) -> Dict[str, Dict[str, Any]]:\n\n cfg = self.cfg_\n\n # Get the data to use, vectorizing the sample feature dictionaries\n y_train = list(self._generate_samples(self.grid_search_ids_, 'y'))\n X_train = self._vectorize_and_sparsify_data(self.gs_vec_,\n self.grid_search_ids_)\n\n # Feature selection\n if cfg.feature_selection_percentile != 1.0:\n loginfo('Removing {0}% of the features during grid search round...'\n .format(100 - 100*cfg.feature_selection_percentile))\n X_train = \\\n (SelectPercentile(chi2,\n percentile=100*cfg.feature_selection_percentile)\n .fit_transform(X_train, y_train))\n\n # Make a `StratifiedKFold` object using the list of labels\n # NOTE: This will effectively redistribute the samples in the\n # various grid search folds, but it will maintain the\n # distribution of labels. Furthermore, due to the use of the\n # `RandomState` object, it should always happen in the exact\n # same way.\n prng = np.random.RandomState(12345)\n gs_cv_folds_ = StratifiedKFold(y=y_train,\n n_folds=self.data_.grid_search_folds,\n shuffle=True,\n random_state=prng)\n\n # Iterate over the learners/parameter grids, executing the grid search\n # cross-validation for each\n loginfo('Doing a grid search cross-validation round with {0} folds for'\n ' each learner and each corresponding parameter grid.'\n .format(self.data_.grid_search_folds))\n n_jobs_learners = ['Perceptron', 'SGDClassifier',\n 'PassiveAggressiveClassifier']\n learner_gs_cv_params_ = {}\n for learner, learner_name, param_grids in zip(self.learners_,\n self.learner_names_,\n cfg.param_grids):\n\n loginfo('Grid search cross-validation for {0}...'\n .format(learner_name))\n\n # If the learner is `MiniBatchKMeans`, set the `batch_size`\n # parameter to the number of training samples\n if learner_name == 'MiniBatchKMeans':\n for param_grid in param_grids:\n param_grid['batch_size'] = [len(y_train)]\n\n # If learner is of any of the learner types in\n # `n_jobs_learners`, add in the `n_jobs` parameter specified\n # in the config (but only do so if that `n_jobs` value is\n # greater than 1 since it won't matter because 1 is the\n # default, anyway)\n if cfg.n_jobs > 1:\n if learner_name in n_jobs_learners:\n for param_grid in param_grids:\n param_grid['n_jobs'] = [cfg.n_jobs]\n\n # Make `GridSearchCV` instance\n folds_diff = cfg.grid_search_folds - self.data_.grid_search_folds\n if (self.data_.grid_search_folds < 2\n or folds_diff/cfg.grid_search_folds > 0.25):\n msg = ('Either there weren\\'t enough folds after collecting '\n 'data (via `ExperimentalData`) to do the grid search '\n 'round or the number of folds had to be reduced to such'\n ' a degree that it would mean a +25\\% reduction in the '\n 'total number of folds used during the grid search '\n 'round.')\n logerr(msg)\n raise ValueError(msg)\n gs_cv = GridSearchCV(learner(),\n param_grids,\n cv=gs_cv_folds_,\n scoring=self._resolve_objective_function())\n\n # Do the grid search cross-validation\n gs_cv.fit(X_train, y_train)\n learner_gs_cv_params_[learner_name] = gs_cv.best_params_\n del gs_cv\n\n del X_train\n del y_train\n\n return learner_gs_cv_params_", "def gridSearch_XGB(gridnum=3):\n n_est_list = np.array([1, 5, 10, 50, 100, 500, 1000])\n max_dep_list = list(range(1,3))\n if gridnum==1:\n grid = {'n_estimators': n_est_list, 'max_depth': np.array(max_dep_list)}\n elif gridnum==2:\n grid = {'max_depth': np.array(max_dep_list+[50])}\n else:\n grid = {'n_estimators': np.array([1000,5000])}\n XGBCla = get_XGBmodel()\n GSxgbCla = GridSearchCV(\n XGBCla, \n grid, \n verbose=2, \n cv=StratifiedKFold(n_splits=5, shuffle=True)\n )\n print(GSxgbCla.best_params_)", "def grid_search_epsilon(environmnet, policy='ε–greedy', parameter='epsilon'):\n\tparameter_values = []\n\tavg_scores = []\n\tavg_steps = []\n\n\tcount = 1\n\tdecay_search = [0.5, 0.6, 0.7, 0.8, 0.9, 0.99, 0.99]\n\tfor param_num in decay_search:\n\n\t\tagent = Q_Agent(exploration_rate_decay=param_num, epsilon=1)\n\t\tall_iterations, all_rewards, step_count = agent.train(environmnet, print_results=True, iter_n=1000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t policy=policy)\n\t\tavg_scores.append(np.mean(all_rewards))\n\t\tavg_steps.append(np.mean(step_count))\n\t\tparameter_values.append(param_num)\n\t\trewards_data = np.array([all_iterations, all_rewards])\n\t\tstep_data = np.array([all_iterations, step_count])\n\n\t\tnp.savetxt(\n\t\t\t'/Users/matthewgalloway/Documents/RF/q_learning/' + parameter + '_inv/' + parameter + '_rewards_' + str(\n\t\t\t\tparam_num) + '.csv', rewards_data.transpose(), delimiter=\",\")\n\t\tnp.savetxt(\n\t\t\t'/Users/matthewgalloway/Documents/RF/q_learning/' + parameter + '_inv/' + parameter + '_steps_' + str(\n\t\t\t\tparam_num) + '.csv', step_data.transpose(), delimiter=\",\")\n\t\tif count % 50 == 0:\n\t\t\tprint('iteration {} of 10'.format(count))\n\n\t\tcount += 1\n\tresults = {\n\t\t'param_values': parameter_values,\n\t\t'avg_scores': avg_scores,\n\t\t'avg_steps': avg_steps,\n\n\t}\n\tprint(results)\n\treturn pd.DataFrame(results)", "def get_network(data, parameters, num_layers, layers=None, save=None):\n d_act = get_d_act(parameters)\n\n if layers is None:\n layers = [[\"relu\", \"dropout\"], [\"relu\", \"regularizer\"], [\"relu\"], [\"sigmoid\"]]\n # If some layers haven't None or regularizer\n layers = [layer if len(layer) > 1 else layer + [None] for layer in layers]\n\n best_networks = []\n for layer in layers:\n acts = get_act_by_layer(layer, d_act)\n if not isinstance(acts, list):\n acts = [acts]\n for act in acts:\n param_grid = {'act': [act]}\n grid_search = GridSearchCV(ModelKeeper(), param_grid)\n grid_search.fit(data.train, data.train_labels, epochs=200)\n flatten = itertools.chain.from_iterable\n grid_search.score(list(ft.reduce(operator.iconcat, data.test, [])), list(ft.reduce(operator.iconcat, data.test_labels, [])))\n\n all_results = grid_search.cv_results_\n\n acts = [([act[\"act\"]], layer) for act in all_results[\"params\"]]\n results = all_results[\"mean_test_score\"]\n\n best_networks += sorted([(results[i], acts[i]) for i in range(len(results))])\n\n best_networks_by_iterations = []\n best_networks_by_iterations += best_networks\n submission = pd.DataFrame({'best_networks_by_iterations': best_networks_by_iterations})\n submission.to_csv('best_networks_by_iterations.csv', index=True)\n # Look over best_networks num_layers - 1 times\n for _ in range(num_layers - 1):\n k = len(best_networks)\n for i in range(k):\n network = best_networks[i]\n act_second = get_act_by_layer(network[1][1], d_act)\n\n param_grid = {'act': act_second}\n grid_search = GridSearchCV(ModelKeeper(network[1][0]), param_grid)\n grid_search.fit(data.train, data.train_labels, epochs=200)\n grid_search.score(list(ft.reduce(operator.iconcat, data.test, [])), list(ft.reduce(operator.iconcat, data.test_labels, [])))\n\n all_results = grid_search.cv_results_\n\n acts = [(network[1][0] + [act[\"act\"]], network[1][1]) for act in all_results[\"params\"]]\n results = all_results[\"mean_test_score\"]\n\n best_networks += sorted([(results[i], acts[i]) for i in range(len(results))], reverse=True)[:2]\n\n best_networks = sorted(best_networks, reverse=True)[:len(best_networks) // 3 + 1]\n best_networks_by_iterations += best_networks\n\n if save:\n # Save result by iteration\n best_networks_by_iterations += best_networks\n submission = pd.DataFrame({'best_networks_by_iterations': best_networks_by_iterations})\n submission.to_csv('best_networks_by_iterations.csv', index=True)\n\n return best_networks[:3], best_networks_by_iterations", "def compute(self, X, Y, n):\n inner_cv = KFold(5, shuffle=True, random_state=1673)\n\n print('-> grid searching and cross validation ...')\n for training, validation, j in self._k_fold_cross_validation(X, 5, n):\n\n x, y, valid_x, valid_y = X.loc[training, :], Y[training], X.loc[validation, :], Y[validation]\n x_features, valid_features = self.sat_features.loc[training, :], self.sat_features.loc[validation, :]\n\n if 'kNN' in self.model_list:\n parameters = {'n_neighbors': range(1, 18, 2)}\n model = KNeighborsRegressor(weights='distance')\n self.kNN = GridSearchCV(estimator=model, param_grid=parameters, cv=inner_cv, scoring=r2)\n\n res = self.kNN.fit(x, y).predict(valid_x)\n self.results['kNN'].append(list(res))\n self.scores['kNN'].append(R2(valid_y, res))\n\n if 'Kriging' in self.model_list:\n parameters = {\"kernel\": [RBF(l) for l in [[1, 1]]]}\n model = GaussianProcessRegressor(alpha=0.1, n_restarts_optimizer=0)\n self.Kriging = GridSearchCV(estimator=model, param_grid=parameters, cv=inner_cv, scoring=r2)\n\n res = self.Kriging.fit(x, y).predict(valid_x)\n self.results['Kriging'].append(list(res))\n self.scores['Kriging'].append(R2(valid_y, res))\n\n if 'RmSense' in self.model_list:\n parameters = {\"alpha\": [0.001, 0.01, 0.1, 1, 10, 100, 1000]}\n model = Ridge()\n self.RmSense = GridSearchCV(estimator=model, param_grid=parameters, cv=inner_cv, scoring=r2)\n #print('INFO: best alpha - ', self.RmSense.fit(x_features, y).best_params_)\n\n res = self.RmSense.fit(x_features, y).predict(valid_features)\n self.results['RmSense'].append(list(res))\n self.scores['RmSense'].append(R2(valid_y, res))\n\n if 'Ensamble' in self.model_list:\n res = (self.RmSense.predict(valid_features) + self.kNN.predict(valid_x)) / 2.\n self.results['Ensamble'].append(list(res))\n self.scores['Ensamble'].append(R2(valid_y, res))\n\n for m in self.model_list:\n print('score {}: {}'.format(m, np.mean(self.scores[m])))", "def grid_search(y, tx, w0, w1):\n losses = np.zeros((len(w0), len(w1)))\n # ***************************************************\n # INSERT YOUR CODE HERE\n # TODO: compute loss for each combination of w0 and w1.\n # ***************************************************\n \n for i in range(len(w0)):\n for j in range(len(w1)):\n w = np.array([w0[i], w1[j]])\n losses[i, j] = compute_cost(y, tx, w)\n \n return losses", "def parameter_search(model_class, momentum, matches, seasons_train, seasons_valid, seasons_test,\n eval_functions, size, n_jobs, seed, test_run):\n params_grid = get_parameter_grid(model_class.__name__, momentum=momentum, size=size, seed=seed)\n valid_index = matches['Season'].isin(seasons_valid).values\n test_index = matches['Season'].isin(seasons_test).values\n print(\"params_grid size: {}\".format(len(params_grid)))\n # For testing: sequential vs parallel\n if test_run:\n results = []\n for i, params in tqdm(params_grid.iterrows()):\n results.append(evaluate(model_class, params.to_dict(), matches, valid_index, test_index,\n seasons_train, seasons_valid, seasons_test, eval_functions))\n else:\n results = Parallel(n_jobs=n_jobs)(delayed(evaluate)(model_class, params.to_dict(), matches,\n valid_index, test_index,\n seasons_train, seasons_valid, seasons_test,\n eval_functions) for i, params in tqdm(params_grid.iterrows()))\n results = pd.DataFrame.from_records(results).sort_values('valid_logloss')\n return results" ]
[ "0.6940125", "0.6903616", "0.678047", "0.67030805", "0.6629677", "0.6530864", "0.6529506", "0.6354389", "0.6334039", "0.6317028", "0.6215231", "0.6210962", "0.61947966", "0.61233485", "0.6083378", "0.6068833", "0.6054756", "0.60233617", "0.60059977", "0.5999201", "0.59955436", "0.59868646", "0.5932703", "0.5926081", "0.59214145", "0.59155947", "0.5912796", "0.5881196", "0.5877707", "0.5871668" ]
0.7779989
0
Gets the next trash day for a given date
def next_regular_trash_day(date: str) -> str: parsed_date = parser.parse(date) day_of_week = parsed_date.weekday() if day_of_week < TRASH_DAY: delta = TRASH_DAY - day_of_week elif day_of_week == TRASH_DAY: delta = 0 else: delta = 7 - (day_of_week - TRASH_DAY) next_trash_date = parsed_date + datetime.timedelta(days=delta) return next_trash_date.strftime('%Y-%m-%d')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_day(date):\n return date + datetime.timedelta(days=1)", "def next_day(date):\n return date + datetime.timedelta(days=1)", "def _next_trading_day(self, day):\n next_day = self._trading_days.shift(-1)[day]\n return next_day if not pd.isnull(next_day) else None", "def get_next_day(self):\n pass", "def next_trash_day(date: str, holidays: list) -> dict:\n next_regular = next_regular_trash_day(date)\n weekdays = get_weekdays(next_regular)\n default_trash_day = {'type': 'default', 'schedule': calendar.day_name[TRASH_DAY]}\n if holiday.contains_holiday(weekdays):\n holiday_name = holiday.get_holiday(weekdays)\n find_holiday = list(filter(lambda holiday_delays: holiday_delays['name'] == holiday_name, holidays))\n if len(find_holiday) > 0:\n trash_day = {'type': 'holiday', 'holiday': holiday_name, 'schedule': find_holiday[0]['routeDelays']}\n else:\n trash_day = default_trash_day\n else:\n trash_day = default_trash_day\n\n return trash_day", "def get_next_weekday(date, weekday):\n return date + dt.timedelta(days=(weekday - date.weekday() + 7) % 7)", "def get_next_trading_day_schedule(reference_day: dt):\n reference_day = reference_day.date()\n schedule = get_trading_calendar(reference_day, reference_day)\n while schedule.empty:\n reference_day += timedelta(days=1)\n schedule = get_trading_calendar(reference_day, reference_day)\n return schedule", "def next_date(date):\n #For this function, I just created as many if else statements as I could to cover every situation I could think of.\n #Most of these if else statements are distinct edge cases where I add 1 in a different spot each time.\n if date[0] == 1 or date[0] == 3 or date[0] == 5 or date[0] == 7 or date[0] == 8 or date[0] == 10:\n if date[1] < 31:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 31:\n nextday = (date[0] + 1, 1, date[2])\n return nextday\n elif date[0] == 12:\n if date[1] < 31:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 31:\n nextday = (1, 1, date[2] + 1)\n return nextday\n elif date[0] == 4 or date[0] == 6 or date[0] == 9 or date[0] == 11:\n if date[1] < 30:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 30:\n nextday = (date[0] + 1, 1, date[2])\n return nextday\n elif date[0] == 2:\n if date[2] % 4 == 0 or date[2] % 1000 == 0:\n if date[1] < 29:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 29:\n nextday = (date[0] + 1, 1, date[2])\n return nextday\n elif date[1] < 28:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 28:\n nextday = (date[0] + 1, 1, date[2])\n return nextday", "def next_weekday(date, weekday):\n delta = weekday - date.weekday()\n if delta < 0:\n delta += 7\n return date + timedelta(days=int(delta))", "def find_next_valid(data, date):\n correct_date = None\n while correct_date is None:\n try:\n _ = data.loc[date]\n correct_date = date\n except KeyError:\n date = add_time(date, day=1)\n return correct_date", "def next_day_of_week(current, day_of_week):\n\n while current.weekday() != day_of_week:\n current += timedelta(1)\n return current", "def _to_next_ceiling_busi_day(date):\n try:\n date = parse(date)\n except TypeError:\n date = date\n\n date = date + relativedelta(months=+1)\n date = DateUtils._to_ceiling_busi_day(date)\n\n return date", "def get_tomorrow(x: Optional[Date] = None) -> Date:\n return (x or get_today()) + TimeDelta(days=1)", "def next_day(isotext):\n as_arrow = arrow.get(isotext)\n return as_arrow.replace(days=+1).isoformat()", "def next_day(isotext):\n as_arrow = arrow.get(isotext)\n return as_arrow.replace(days=+1).isoformat()", "def DAY(date):\n return _make_datetime(date).day", "def get_next_monday(date):\n return date + datetime.timedelta(days=-date.weekday(), weeks=1)", "def next_sunday(day):\n if day.weekday() == 6: # sunday\n return day + timedelta(days=7)\n else:\n return day + timedelta(days=(6 - day.weekday()))", "def increment_day(date):\n year, month, day = (date.year, date.month, date.day)\n try:\n day += 1\n return datetime.date(year, month, day)\n except ValueError:\n try:\n month += 1\n day = 1\n return datetime.date(year, month, day)\n except ValueError:\n try:\n year += 1\n month = 1\n day = 1\n return datetime.date(year, month, day)\n except ValueError:\n raise", "def next_day(year, month, day):\n thisday = dt.datetime(year, month, day)\n nextday = thisday + dt.timedelta(days=1)\n y = nextday.year\n m = nextday.month\n d = nextday.day\n return y, m, d", "def test_first_date_static_1(self):\n input_ = (datetime.date(2006, 3, 1), datetime.date(2006, 3, 30))\n expected = (datetime.date(2006, 3, 1), datetime.date(2006, 3, 31))\n actual = self.expander._get_next_days(*input_)\n\n self.assertEqual(expected, actual)", "def _first_good_date(self, day):\n count = 0\n while True:\n try:\n self.data.loc[day - timedelta(count)]\n return day - timedelta(count)\n except KeyError:\n count += 1", "def test_first_date_static_2(self):\n input_ = (datetime.date(2006, 3, 1), datetime.date(2006, 3, 31))\n expected = (datetime.date(2006, 3, 1), datetime.date(2006, 4, 1))\n actual = self.expander._get_next_days(*input_)\n\n self.assertEqual(expected, actual)", "def get_next_closest_day(weekday):\n names = {\n 'monday': 0,\n 'tuesday': 1,\n 'wednesday': 2,\n 'thursday': 3,\n 'friday': 4,\n 'saturday': 5,\n 'sunday': 6\n }\n\n today = get_current_india_time().date()\n day_shift = (names[weekday] - today.weekday()) % 7\n next_day = datetime.datetime.combine(\n today + datetime.timedelta(days=day_shift), datetime.time.min)\n\n if next_day.weekday() == today.weekday():\n next_day = next_day + datetime.timedelta(days=7)\n return next_day", "def get_next_byday(self, daystring, startdate, fmt=None):\n\n # decimal number day of the week we're starting from. %w formats using Sunday as day 0.\n dow_start = int(datetime.datetime.strftime(startdate, '%w'))\n\n\n # decimal number day of week we're trying to get.\n dow_target = self.weekdays.index(daystring)\n\n # len - ((start + (len - target)) % len)\n days_ahead = 7 - ((dow_start + (7 - dow_target)) % 7)\n res = startdate + datetime.timedelta(days=days_ahead)\n return res", "def next_release_date(date):\n df = get_release_dates()\n df = df[df['ReleaseDate'] > date]\n return df['ReleaseDate'].iloc[0]", "def get_day(today: Day, weekday_number: int) -> Day:\n assert type(today) is Day\n assert type(weekday_number) is int\n\n today = today.to_date_time()\n date_list = list(rrule(DAILY, count=1, wkst=MO, byweekday=weekday_number, dtstart=today))\n if date_list:\n return Day(date_list[0])", "def get_date(self, ord):\n if 0 <= ord < self.days_count:\n return self.start + timedelta(days=ord)\n else:\n raise IndexError()", "def tomorrow(self):\n tomorrow = datetime.date.today() + datetime.timedelta(days=1)\n return [t for t in self.tasks if t.date == tomorrow]", "def tomorrow(self):\n if self.isLeapYear():\n fdays = 29\n else:\n fdays = 28\n\n DIM = [0, 31, fdays, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n currentDay = self.day\n maxDay = DIM[self.month]\n\n if currentDay == maxDay and self.month == 12:\n self.year += 1\n self.month = 1\n self.day = 1\n elif currentDay == maxDay:\n self.month += 1\n self.day = 1\n else:\n self.day += 1" ]
[ "0.76969635", "0.76969635", "0.70977765", "0.704082", "0.66516757", "0.6224729", "0.6222216", "0.61540645", "0.6137545", "0.6133715", "0.6121568", "0.59999067", "0.59967107", "0.59871405", "0.59871405", "0.5984374", "0.58846253", "0.58306783", "0.579761", "0.57365865", "0.56952983", "0.56930244", "0.5680795", "0.5623294", "0.561203", "0.5610597", "0.5567719", "0.5566013", "0.5557397", "0.5556569" ]
0.82727396
0
gets the next trash day taking holidays into consideration
def next_trash_day(date: str, holidays: list) -> dict: next_regular = next_regular_trash_day(date) weekdays = get_weekdays(next_regular) default_trash_day = {'type': 'default', 'schedule': calendar.day_name[TRASH_DAY]} if holiday.contains_holiday(weekdays): holiday_name = holiday.get_holiday(weekdays) find_holiday = list(filter(lambda holiday_delays: holiday_delays['name'] == holiday_name, holidays)) if len(find_holiday) > 0: trash_day = {'type': 'holiday', 'holiday': holiday_name, 'schedule': find_holiday[0]['routeDelays']} else: trash_day = default_trash_day else: trash_day = default_trash_day return trash_day
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_day(self):\n pass", "def next_regular_trash_day(date: str) -> str:\n parsed_date = parser.parse(date)\n day_of_week = parsed_date.weekday()\n\n if day_of_week < TRASH_DAY:\n delta = TRASH_DAY - day_of_week\n elif day_of_week == TRASH_DAY:\n delta = 0\n else:\n delta = 7 - (day_of_week - TRASH_DAY)\n\n next_trash_date = parsed_date + datetime.timedelta(days=delta)\n return next_trash_date.strftime('%Y-%m-%d')", "def _next_trading_day(self, day):\n next_day = self._trading_days.shift(-1)[day]\n return next_day if not pd.isnull(next_day) else None", "def next_deadline():\n\n today = date.today()\n\n days_since_starting_sunday = (today - date(2016, 9, 4)).days\n\n if days_since_starting_sunday % 14 < 7:\n return next_sunday(next_sunday(today))\n else:\n return next_sunday(today)", "def get_nearest_business_day(date: date_class, holidays: list) -> date_class:\n \n while date in holidays:\n num_of_days_after_holiday = 1\n week_day_placeholder = (date + timedelta(days=num_of_days_after_holiday)).weekday()\n while week_day_placeholder in {5,6}:\n num_of_days_after_holiday += 1\n week_day_placeholder = (date + timedelta(days=num_of_days_after_holiday)).weekday()\n \n num_of_days_before_holiday = 1\n week_day_placeholder = (date - timedelta(days=num_of_days_before_holiday)).weekday()\n while week_day_placeholder in {5,6}:\n num_of_days_before_holiday += 1\n week_day_placeholder = (date - timedelta(days=num_of_days_before_holiday)).weekday()\n\n # POST PROCESS\n # Note: If the number of days before and after are same, the default date will be next day.\n if num_of_days_before_holiday < num_of_days_after_holiday:\n date -= timedelta(days=num_of_days_before_holiday)\n else:\n date += timedelta(days=num_of_days_after_holiday)\n return date", "def next_day(date):\n return date + datetime.timedelta(days=1)", "def next_day(date):\n return date + datetime.timedelta(days=1)", "def next_date(date):\n #For this function, I just created as many if else statements as I could to cover every situation I could think of.\n #Most of these if else statements are distinct edge cases where I add 1 in a different spot each time.\n if date[0] == 1 or date[0] == 3 or date[0] == 5 or date[0] == 7 or date[0] == 8 or date[0] == 10:\n if date[1] < 31:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 31:\n nextday = (date[0] + 1, 1, date[2])\n return nextday\n elif date[0] == 12:\n if date[1] < 31:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 31:\n nextday = (1, 1, date[2] + 1)\n return nextday\n elif date[0] == 4 or date[0] == 6 or date[0] == 9 or date[0] == 11:\n if date[1] < 30:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 30:\n nextday = (date[0] + 1, 1, date[2])\n return nextday\n elif date[0] == 2:\n if date[2] % 4 == 0 or date[2] % 1000 == 0:\n if date[1] < 29:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 29:\n nextday = (date[0] + 1, 1, date[2])\n return nextday\n elif date[1] < 28:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 28:\n nextday = (date[0] + 1, 1, date[2])\n return nextday", "def next_sunday(day):\n if day.weekday() == 6: # sunday\n return day + timedelta(days=7)\n else:\n return day + timedelta(days=(6 - day.weekday()))", "def _get_next_monday(self):\n today = datetime.date.today()\n weekday_int = today.weekday()\n if weekday_int == 0:\n return today\n next_mon = today + timedelta(7 - weekday_int)\n return next_mon", "def get_latest_trading_date(date, url, service_key):\n holidays = get_holidays(date.year, url, service_key)\n holidays.append(datetime.datetime(year=date.year, month=12, day=31))\n holidays = tuple(holidays)\n while date.weekday() in (5, 6) or date in holidays:\n # 0:MON, 1:TUE, 2:WED, 3:THU, 4:FRI, 5:SAT, 6:SUN\n date = date - datetime.timedelta(days=1)\n return date", "def get_next_trading_day_schedule(reference_day: dt):\n reference_day = reference_day.date()\n schedule = get_trading_calendar(reference_day, reference_day)\n while schedule.empty:\n reference_day += timedelta(days=1)\n schedule = get_trading_calendar(reference_day, reference_day)\n return schedule", "def get_next_closest_day(weekday):\n names = {\n 'monday': 0,\n 'tuesday': 1,\n 'wednesday': 2,\n 'thursday': 3,\n 'friday': 4,\n 'saturday': 5,\n 'sunday': 6\n }\n\n today = get_current_india_time().date()\n day_shift = (names[weekday] - today.weekday()) % 7\n next_day = datetime.datetime.combine(\n today + datetime.timedelta(days=day_shift), datetime.time.min)\n\n if next_day.weekday() == today.weekday():\n next_day = next_day + datetime.timedelta(days=7)\n return next_day", "def test_get_index_of_day(self):\n days = [\"01.07.2013\",\n \"05.07.2013\",\n \"09.07.2013\",\n \"14.07.2013\",\n \"19.07.2013\"]\n # Find the days\n self._test_find_day(days)\n # Search for a day that is not part of the list\n # 1. A day before the first entry\n self._test_giod(days, \"01.01.2013\", 0,\n -1, \"Find not existing day in list\")\n self._test_giod(days, \"01.01.2013\", 1,\n 0, \"Find a date before days withe next = 1\")\n self._test_giod(days, \"01.01.2013\", 1,\n 0, \"Find a date before days withe next = -1\")\n # 2. A day after the last entry\n self._test_giod(days, \"01.12.2013\", 0,\n -1, \"Find not existing day in list\")\n self._test_giod(days, \"01.12.2013\", 1,\n 4, \"Find a date after days with next = 1\")\n self._test_giod(days, \"01.12.2013\", -1,\n 4, \"Find a date after days with next = -1\")\n # 3. A day in the middle\n self._test_giod(days, \"06.07.2013\", 0,\n -1, \"Find not existing day in list\")\n self._test_giod(days, \"06.07.2013\", 1,\n 2, \"Find a date after days with next = 1\")\n self._test_giod(days, \"06.07.2013\", -1,\n 1, \"Find a date after days with next = -1\")", "def next_day_of_week(current, day_of_week):\n\n while current.weekday() != day_of_week:\n current += timedelta(1)\n return current", "def get_next_weekday(date, weekday):\n return date + dt.timedelta(days=(weekday - date.weekday() + 7) % 7)", "def n_business_days(self, n=-2):\n\n business_days = 0\n calendar_days = 0 \n if n != 0:\n step = int(n/abs(n))\n while business_days != abs(n):\n calendar_days = calendar_days + step\n if business_day(self.time_stamp + timedelta(calendar_days)):\n business_days = business_days + 1\n return self.time_stamp + timedelta(calendar_days)\n return date", "def next_weekday(weekday, d=datetime.datetime.now()):\n if weekday.lower() not in day_values:\n return None\n days_ahead = day_values[weekday.lower()] - d.weekday()\n if days_ahead <= 0: # Target day already happened this week\n days_ahead += 7\n return d + datetime.timedelta(days_ahead)", "def test_get_index_of_day_one_day_list(self):\n days = [\"15.07.2013\"]\n self._test_find_day(days)\n self._test_giod(days, \"16.07.2013\", 0,\n -1, \"Find not existing day in an One-Day-List\")\n self._test_giod(days, \"16.07.2013\", 1,\n 0, \"Find not existing day in an One-Day-List with next=1.\")\n self._test_giod(days, \"16.07.2013\", -1,\n 0, \"Find not existing day in an One-Day-List with next=-1.\")\n self._test_giod(days, \"10.07.2013\", 0,\n -1, \"Find not existing day in an One-Day-List\")\n self._test_giod(days, \"10.07.2013\", 1,\n 0, \"Find not existing day in an One-Day-List with next=1.\")\n self._test_giod(days, \"10.07.2013\", -1,\n 0, \"Find not existing day in an One-Day-List with next=-1.\")", "def getnextrunningdate(jsondata):\n\n returneddata = json.loads(jsondata)\n days = {}\n\n if returneddata[\"response_code\"]==200:\n trainData = returneddata[\"train\"]\n daysData = trainData[\"days\"]\n if daysData:\n for day in trainData[\"days\"]:\n days[day[\"day-code\"]]=day[\"runs\"]\n\n today = datetime.date.today()\n nextweekday = (today + datetime.timedelta(days=7))\n\n for i in range(len(days)):\n runningdate = (nextweekday + datetime.timedelta(days=i))\n if models.istrainrunningonjourneydate(days, runningdate):\n return runningdate\n\n return nextweekday", "def test_first_date_static_1(self):\n input_ = (datetime.date(2006, 3, 1), datetime.date(2006, 3, 30))\n expected = (datetime.date(2006, 3, 1), datetime.date(2006, 3, 31))\n actual = self.expander._get_next_days(*input_)\n\n self.assertEqual(expected, actual)", "def _FirstSunday(self, dtz): # pylint: disable-msg=C0103,R0201\n return dtz + datetime.timedelta(days=(6-dtz.weekday()))", "def test_first_date_static_2(self):\n input_ = (datetime.date(2006, 3, 1), datetime.date(2006, 3, 31))\n expected = (datetime.date(2006, 3, 1), datetime.date(2006, 4, 1))\n actual = self.expander._get_next_days(*input_)\n\n self.assertEqual(expected, actual)", "def next_day(isotext):\n as_arrow = arrow.get(isotext)\n return as_arrow.replace(days=+1).isoformat()", "def next_day(isotext):\n as_arrow = arrow.get(isotext)\n return as_arrow.replace(days=+1).isoformat()", "def lambda_handler(event, context) -> dict:\n logging.info('Starting function with context=%s and event=%s', context, event)\n date = event['date']\n\n holiday_schedule = trash_schedule_service.get_schedule()\n trash_day = trash.next_trash_day(date, holiday_schedule)\n logging.info('Completed function with response=%s', trash_day)\n return trash_day", "def next_weekday(date, weekday):\n delta = weekday - date.weekday()\n if delta < 0:\n delta += 7\n return date + timedelta(days=int(delta))", "def test_delta_31_days(self):\n input_ = (datetime.date(1996, 3, 30), datetime.date(1996, 4, 30))\n output = self.expander._get_next_days(*input_)\n expected = 31\n actual = (output[-1] - output[0]).days\n\n self.assertEqual(expected, actual)", "def get_fixed_holidays(self, year):\n # 2021 exception.\n # Because May 1st is both International Workers' day and Easter\n self.include_labour_day = (year != 2021)\n\n # Unshifted days are here:\n days = super().get_fixed_holidays(year)\n days_to_inspect = copy(days)\n for day_shifted in self.get_shifted_holidays(days_to_inspect):\n days.append(day_shifted)\n\n # 2021 exception.\n # Because May 1st is both International Workers' day and Easter\n if year == 2021:\n days.append((date(2021, 5, 4), self.labour_day_label))\n return days", "def _first_good_date(self, day):\n count = 0\n while True:\n try:\n self.data.loc[day - timedelta(count)]\n return day - timedelta(count)\n except KeyError:\n count += 1" ]
[ "0.70619345", "0.69836265", "0.69702566", "0.6622131", "0.64956003", "0.6239636", "0.6239636", "0.6213379", "0.61707443", "0.61419547", "0.61302257", "0.61245453", "0.6110192", "0.6090458", "0.6057315", "0.60220957", "0.6020746", "0.60141313", "0.5966333", "0.5921652", "0.5907723", "0.59058887", "0.5903364", "0.5881393", "0.5881393", "0.5833611", "0.5809572", "0.5803372", "0.57618505", "0.575208" ]
0.7244827
0
Get `Tokenizer` and `Model` for a model name.
def get_tokenizer_and_model(model_name: str): tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModel.from_pretrained(model_name) model.output_hidden_states = True return tokenizer, model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_model(model):\n all_models = cmd.get_object_list()\n\n if len(all_models) == 0:\n logging.parser_error('No models are opened.')\n return\n\n model = model.lower()\n\n if model and (model in all_models):\n return model\n\n if len(all_models) > 1:\n logging.parser_error(\"Please specify which model you want to use. {}\".format(all_models))\n return\n\n return all_models[0]", "def get_model_by_name(self, model_name):\n models = ModelDirectory.get_model_by_name(model_name, pipeline=self)\n return models", "def get_model(model_name):\n model = CNN().get_model(model_name=model_name)\n\n return model", "def get_model(name):\n\n try:\n from .model_defs import get_model_from_def\n model = get_model_from_def(name)\n logger.info(\"Model {n} loaded from model_defs module\".format(n=name))\n except NameError:\n try:\n model = get_model_from_yaml(name)\n logger.info(\"Model {n} loaded from yaml\".format(n=name))\n except KeyError:\n try:\n from .model_defs import parse_model_name\n model = parse_model_name(name)\n logger.info(\"Model {n} parsed from name\".format(n=name))\n except NameError:\n sys.exit(\"Unknown model {n}\".format(n=name))\n\n if not hasattr(model, 'name'):\n model.name = name\n\n return model", "def get_model(model_name):\n module_name = 'strain.models.strain_' + model_name.lower()\n model_module = importlib.import_module(module_name)\n obj = getattr(model_module, model_name)\n return obj", "def get_model_definition(request):\n modelname = request.matchdict['modelname']\n results = db_model_definition(request.db)[modelname]\n for result in results:\n return result.value\n raise NotFound(\"Unknown model %s\" % modelname)", "def get_model(self, name):\n bundle_name, model_name = name.split(\".\")\n bundle = self.bundles[bundle_name]\n model = bundle.models[name]\n return model", "def get_model(model=gin.REQUIRED):\n return model", "def get_model_reference(self, model_name):\n\n print_debug(\"Geting model :\" + model_name)\n model = ModelsFactory.get(model_name=model_name)\n return model", "def get_model_by_name(cls, name):\n model_name = inflection.camelize(name) # class name of the model to use\n model = cls.models[model_name]\n return model", "def find_model_using_name(model_name):\n model_filename = \"models.\" + model_name + \"_model\"\n modellib = importlib.import_module(model_filename)\n model = None\n target_model_name = model_name.replace('_', '') + 'model'\n for name, cls in modellib.__dict__.items():\n if name.lower() == target_model_name.lower() \\\n and issubclass(cls, BaseModel):\n model = cls\n\n if model is None:\n print(\"In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase.\" % (model_filename, target_model_name))\n exit(0)\n\n return model", "def model_name(self):\n setting = self.get_setting_definition(self.key, **self.get_kwargs())\n\n return setting.get('model', None)", "def retrieve_model(self, model_name):\n\t\tmodel_detail = dbop.get_model(self, model_name)\n\t\t#since the 'owner' field of model_detail is only owner's username,\n\t\t#we have to change it to a User object\n\t\t#In this case, the owner of this model is the user itself\n\t\tmodel_detail['owner'] = self\n\t\tif model_detail['model_type'] == 'SPSS Predictive Model':\n\t\t\treturn model.SPSSModel(**model_detail)\n\t\telif model_detail['model_type'] == 'DashDB In-database Model':\n\t\t\treturn model.DashdbModel(**model_detail)", "def get_model(name, disable_logging=False):\n return PluginLoader._import(\"train.model\", name, disable_logging)", "def get_model_name(self) -> str:\n raise NotImplementedError", "def get_tokenizer_class(model_name):\n return OpenAIGPTTokenizer if model_name == 'openai-gpt' else GPT2Tokenizer", "def get_model(model: str) -> Any:\n try:\n model_function = eval(model)\n except (NameError, AttributeError) as err:\n sys.exit(f'{err}. Accepted models from {tf}, {sm}, {tfa}, {tfc}')\n return model_function", "def get_model(name):\n # Evil reflection\n model_name = name.lower()\n model_module = importlib.import_module('.'+model_name, cfg.model_pck)\n [(_, model_class)] = inspect.getmembers(\n model_module,\n lambda c: inspect.isclass(c) and sys.modules[c.__module__] == model_module)\n\n tf.logging.debug('Found class %s', model_class)\n return model_class", "def model(self) -> Optional[str]:\n return pulumi.get(self, \"model\")", "def model(self):\n return MODELS.get(self._model,self._model)", "def get_transformer(model_name):\n model_class, tokenizer_class, pretrained_weights = TRANSFORMER_MODELS[model_name]\n model = model_class.from_pretrained(pretrained_weights,\n output_hidden_states=True)\n tokenizer = tokenizer_class.from_pretrained(pretrained_weights)\n\n return model, tokenizer, TRANSFORMER_EMBEDDING_DIMS[model_name]", "def get_model(*, name: str) -> typing.Optional[typing.Type]:\n return getattr(open_alchemy.models, name, None)", "def model(self, model_num = 0):\n return self.struct[model_num]", "def GetModelName(filename, model):\n\n is_srn_model = translator.IsSrnModel(model)\n if(is_srn_model):\n model_name = filename + \"SrnModel\"\n else:\n model_name = filename + \"CellCycleModel\"\n\n return model_name", "def get_model(parameters):\n if MODEL == 6:\n return get_model_6(parameters)\n elif MODEL == 5:\n return get_model_5(parameters)\n elif MODEL == 4:\n return get_model_4(parameters)\n elif MODEL == 3:\n if CROSS_VALIDATION:\n return get_cv_model_3(parameters)\n else:\n return get_model_3(parameters)\n elif MODEL == 2:\n return get_model_2(parameters)\n else:\n return get_model_1(parameters)", "def model_class(self):\n model_name = self.model_name()\n\n if not model_name:\n return None\n\n try:\n (app, mdl) = model_name.strip().split('.')\n except ValueError:\n logger.error(f\"Invalid 'model' parameter for setting {self.key} : '{model_name}'\")\n return None\n\n app_models = apps.all_models.get(app, None)\n\n if app_models is None:\n logger.error(f\"Error retrieving model class '{model_name}' for setting '{self.key}' - no app named '{app}'\")\n return None\n\n model = app_models.get(mdl, None)\n\n if model is None:\n logger.error(f\"Error retrieving model class '{model_name}' for setting '{self.key}' - no model named '{mdl}'\")\n return None\n\n # Looks like we have found a model!\n return model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model" ]
[ "0.7199492", "0.7023352", "0.6884367", "0.68522125", "0.68059415", "0.679673", "0.678864", "0.67526037", "0.6655757", "0.6638868", "0.6599854", "0.655437", "0.6554283", "0.65510416", "0.6477091", "0.6466176", "0.64611083", "0.64491063", "0.6438798", "0.6429388", "0.64275306", "0.6405628", "0.63969105", "0.6359604", "0.6333432", "0.63145095", "0.63063157", "0.63063157", "0.63063157", "0.63063157" ]
0.7214922
0
Return a list of all the activation norms.
def get_activation_norms( parameters: List[Parameter], normalize: bool = True ) -> List[float]: with torch.no_grad(): all_norms = [] for param in parameters: if len(param.size()) != 2: continue norms = param.norm(dim=1, p=2) if normalize: norms /= sqrt(param.size(1)) all_norms.extend(norm.item() for norm in norms) return all_norms
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_weight_norms(self, sess, matrix_norm_fxn = lambda x: np.linalg.norm(x, ord = 1)):\n model_norms = []\n weights_list = self.get_weights_np(sess)\n for weights in weights_list:\n norm = matrix_norm_fxn(weights)\n model_norms.append(norm)\n return model_norms", "def get_norms(self):\n l1_sum = 0\n l2_sum = 0\n actives = 0\n for lbl in self.labels:\n for fid in self.w[lbl]:\n # apply and remaing L1 penalities at the end of training.\n alpha = self.s - self.lastW[lbl].get(fid,0)\n self.w[lbl][fid] = self.w[lbl].get(fid, 0) - alpha\n weight = self.w[lbl][fid]\n l1_sum += weight if weight > 0 else -weight\n l2_sum += weight * weight\n if weight != 0:\n actives += 1\n l2_sum = math.sqrt(l2_sum)\n return (l1_sum,l2_sum,actives)", "def ensemble_norm(self):\n return np.linalg.norm(self.ensemble_transition_matrix, ord=\"fro\")", "def get_kernel_norms(self):\n corresponding_simu = self._corresponding_simu()\n get_norm = np.vectorize(lambda kernel: kernel.get_norm())\n return get_norm(corresponding_simu.kernels)", "def get_weight_norms(parameters: List[Parameter]) -> np.ndarray:\r\n with torch.no_grad():\r\n norms = torch.cat([param.abs().flatten() for param in parameters])\r\n return norms.numpy()", "def get_kernel_norms(self):\n return self.adjacency", "def get_kernel_norms(self):\n return np.einsum('ijk->ij', self.amplitudes)", "def norm(self):", "def _generate_batch_norms(self, Node_Sizes):\n batchnorms = [None for _ in range(len(Node_Sizes)-1)]\n for i in range(len(Node_Sizes)-1):\n batchnorms[i] = nn.BatchNorm1d(Node_Sizes[i])\n\n return batchnorms", "def layer_norm_vars(units):\n scale = tf.get_variable(\n \"layer_norm_scale\", [units], initializer=tf.ones_initializer())\n bias = tf.get_variable(\n \"layer_norm_bias\", [units], initializer=tf.zeros_initializer())\n return scale, bias", "def FindBatchNormLayers(network):\n batch_norm_keys = []\n for layer in network.layer:\n if layer.type =='BatchNorm':\n batch_norm_keys.append(layer.name)\n \n return batch_norm_keys", "def norm(self):\n # TODO: implement\n return", "def _init_norm(self, weights):\n from tensorflow.python.ops.linalg_ops import norm\n with variable_scope.variable_scope('init_norm'):\n flat = array_ops.reshape(weights, [-1, self.layer_depth])\n return array_ops.reshape(norm(flat, axis=0), (self.layer_depth,))", "def _init_norm(self, weights):\n from tensorflow.python.ops.linalg_ops import norm\n with variable_scope.variable_scope('init_norm'):\n flat = array_ops.reshape(weights, [-1, self.layer_depth])\n return array_ops.reshape(norm(flat, axis=0), (self.layer_depth,))", "def norm(self):\n raise NotImplementedError", "def batch_norm(self, inputs):\n x = inputs\n x = self.bn(x)\n return x", "def batch_norm(x, train, init, act=None, name=None, eps=1e-5, decay=0.9):\n\n return tf.contrib.layers.batch_norm(x,\n decay=decay,\n epsilon=eps,\n scale=True,\n param_initializers=init,\n is_training=train,\n scope=name,\n activation_fn=act,\n updates_collections=None)", "def _layer_norm_vars(filters):\n scale = tf.get_variable(\n \"layer_norm_scale\", [filters], initializer=tf.ones_initializer())\n bias = tf.get_variable(\n \"layer_norm_bias\", [filters], initializer=tf.zeros_initializer())\n return scale, bias", "def calc_norm(self, corpus):\n logger.info(\"Performing %s normalization...\" % (self.norm))\n norms = []\n numnnz = 0\n docno = 0\n for bow in corpus:\n docno += 1\n numnnz += len(bow)\n norms.append(matutils.unitvec(bow, self.norm))\n self.num_docs = docno\n self.num_nnz = numnnz\n self.norms = norms", "def norm( self):\n return self._norm", "def remove_norms(self):\n dev = next(self.parameters()).device\n for name, module in self.named_modules():\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0_reverse')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_weight_norm(module)\n print(\"Removed wnorm from {}\".format(name))\n except:\n pass\n self.to(device=dev)", "def get_tensor_list_norm(tensor_list: List[torch.Tensor]):\n\t# return torch.norm(torch.cat(tensor_list, dim=0))\n\treturn torch.norm(tensor_list)", "def get_components(self, norm=False):\n return self._var_names", "def getNorm(self, norm=lambda l: (sum(map(lambda x: x ** 2, l))) ** (1 / 2)):\n return norm(self.components)", "def get_norm_layer():\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)\n return norm_layer", "def weight_l2_norm():\n cumulated_l2_norm = tf.constant(0., dtype=tf.float32)\n for trainable_variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):\n name = trainable_variable.name.split('/')[-1]\n if name.startswith('weights'):\n cumulated_l2_norm += tf.nn.l2_loss(trainable_variable)\n return cumulated_l2_norm", "def test_batch_norm_layers():\n layers = [[\"gru\", 20], [\"lstm\", 3], [\"linear\", 4], [\"linear\", 10]]\n rnn = RNN(layers_info=layers, hidden_activations=\"relu\", input_dim=5,\n output_activation=\"relu\", initialiser=\"xavier\", batch_norm=True)\n assert len(rnn.batch_norm_layers) == 3\n assert rnn.batch_norm_layers[0].num_features == 20\n assert rnn.batch_norm_layers[1].num_features == 3\n assert rnn.batch_norm_layers[2].num_features == 4", "def layer_norm_vars(filters):\n scale = tf.get_variable(\n \"layer_norm_scale\", [filters], initializer=tf.ones_initializer())\n bias = tf.get_variable(\n \"layer_norm_bias\", [filters], initializer=tf.zeros_initializer())\n return scale, bias", "def get_activation_names(model: onnx_pb.ModelProto) -> List[str]:\n activation_names = get_graph_intermediate_activations(model.graph)\n activation_names.extend([node.name for node in model.graph.output])\n return activation_names", "def __call__(self, features):\n norm = []\n for data in features:\n if all(x == 0 for x in data):\n norm.append(data)\n else:\n scale = sum(x*x for x in data) ** 0.5\n normalized_data = [x / scale for x in data]\n norm.append(normalized_data)\n \n return norm" ]
[ "0.72380096", "0.634995", "0.6256386", "0.6210975", "0.61363864", "0.6032762", "0.6022503", "0.5840832", "0.58042806", "0.570163", "0.5697164", "0.568862", "0.56479114", "0.56479114", "0.56430805", "0.5551576", "0.5540852", "0.5531631", "0.55178106", "0.5497287", "0.54775363", "0.54610723", "0.54588825", "0.5448181", "0.5445513", "0.54421204", "0.5440094", "0.54398483", "0.5411783", "0.54110795" ]
0.737135
0
Get an array of all weight magnitudes in the parameter list.
def get_weight_norms(parameters: List[Parameter]) -> np.ndarray: with torch.no_grad(): norms = torch.cat([param.abs().flatten() for param in parameters]) return norms.numpy()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getMagnitudes(self):\n return self._bmag, self._vmag, self._jmag, self._hmag, self._kmag", "def get_params_as_list(self):\n\n\t\tparams = [self.shape_slope, self.z_thick, self.thick, self.length]\n\t\treturn params", "def weights(self):\n return [x.numpy() for x in self.core.w]", "def GetWavelengths (self) :\n\t\treturn self.run(\"GetWavelengths\")", "def getweigths():\n ls = []\n for i_lay in range(1, len(layers)):\n ls.append(layers[i_lay][\"weigths\"])\n return ls", "def get_weight_list(self) -> List[float]:\n return self._weight_list", "def weights(self) -> List[float]:", "def magnitudes():\n magnitudes = db_conn.session.query(db_conn.earthquakes.magnitude.distinct()).all()\n\n # converts a list of list into a single list (flattens list)\n earthquake_list = [item for sublist in list(magnitudes) for item in sublist]\n\n # return a list of column names (sample names)\n float_earthquakes = [float(x) for x in earthquake_list]\n return jsonify(earthquake_list)", "def get_variables(self) -> typing.List:\n parts = (self.neural_net.encoder, self.neural_net.predictor, self.neural_net.dynamics)\n return [v for v_list in map(lambda n: n.weights, parts) for v in v_list]", "def get_list_powers(self):\r\n _debug('simq03b_api.get_list_powers')\r\n \r\n s = self.query('SOUR:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def calculate_mags(self):\n res = numpy.fft.rfft(self.cur_input)\n self.mags = []\n for num in res[1:]:\n real = float(numpy.real(num))\n imag = float(numpy.imag(num))\n mag = math.sqrt((real**2)+(imag**2))\n self.mags.append(mag)", "def get_list_powers(self):\r\n s = self.query('LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def get_weights(self):\n return [self.W]", "def get_weights(self):\n return [self.W]", "def waveforms(self):\n return list(self._waveforms)", "def get_data(self):\n return [self.weight]", "def get_forward_parameter_list(self):\n parameterlist = []\n parameterlist.append(self.weights)\n if self.bias is not None:\n parameterlist.append(self.bias)\n return parameterlist", "def get_weight_norms(self, sess, matrix_norm_fxn = lambda x: np.linalg.norm(x, ord = 1)):\n model_norms = []\n weights_list = self.get_weights_np(sess)\n for weights in weights_list:\n norm = matrix_norm_fxn(weights)\n model_norms.append(norm)\n return model_norms", "def get_list_powers(self):\r\n s = self.query('SOUR1:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def get_list_powers(self):\r\n s = self.query('SOUR1:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def _magsamples(self):\n if self._derived_properties[\"magsamples\"] is None:\n if self.lbda is None:\n raise AttributeError(\"lbda not set.\")\n self.derive_magsamples()\n \n return self._derived_properties[\"magsamples\"]", "def get_wavelength_array(self):\n return self.Me.get_wavelength_array()", "def _get_magnitudes(self):\n\n self.logging.debug('Get magnitudes ' )\n\n self.mags = {}\n\n steps = ['dbopen netmag', 'dbsubset orid != NULL']\n\n fields = ['orid', 'magid', 'magnitude', 'magtype',\n 'auth', 'uncertainty', 'lddate']\n\n for v in extract_from_db(self.db, steps, fields):\n orid = v.pop('orid')\n self.logging.debug('new mag for orid:%s' % orid)\n\n try:\n v['strmag'] = '%0.1f %s' % ( float(v['magnitude']), v['magtype'] )\n except:\n v['strmag'] = '-'\n\n if not orid in self.mags:\n self.mags[ orid ] = {}\n\n self.mags[ orid ][ v['magid'] ] = v", "def get_params_array(self):\n return np.array(self.W), np.array(self.b)", "def magnitude(*args):\r\n return sqrt(dot(args, args))", "def encode_weights(self):\n weights = []\n for param in self.global_policy.parameters():\n shape = list(param.shape)\n param_list = torch.flatten(param.data).tolist()\n weights.append(Tensor(shape, param_list))\n\n return weights", "def get_weights(self):\n return []", "def __call__(self, features: List[List[float]]) -> List[List[float]]:\n normalized_features = []\n for feature in features:\n magnitude = np.linalg.norm(feature)\n if magnitude != 0:\n feat = [a/magnitude for a in feature]\n normalized_features.append(feat)\n else:\n normalized_features.append(feature)\n return normalized_features\n #raise NotImplementedError", "def GetWeights(self) -> numpy.ndarray:\n return numpy.concatenate(list(\n variable_ndarray.flatten() for variable_ndarray in\n self._layer.get_weights()))", "def get_weights(self):\n return [self.w, self.b]" ]
[ "0.6793161", "0.6157048", "0.61102766", "0.605747", "0.60502726", "0.60382026", "0.6035437", "0.5940467", "0.5813786", "0.5808551", "0.5779497", "0.5756466", "0.57503474", "0.57503474", "0.5745381", "0.5709613", "0.5707603", "0.570318", "0.570047", "0.570047", "0.5683576", "0.5669668", "0.56548864", "0.5632903", "0.56290025", "0.5593409", "0.5588731", "0.55664223", "0.5548816", "0.55399966" ]
0.65364444
1
Get a list of parameters tied to the embedding layer.
def get_embed_params(model) -> List: return [param for name, param in model.named_parameters() if "embed" in name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parameters(self):\n params = []\n for layer in (self.conv1, self.conv2, self.conv3, self.conv4, self.dense1, self.dense2):\n params += list(layer.parameters)\n return params", "def get_params(self) -> torch.Tensor:\n params = []\n for pp in list(self.net.parameters()):\n params.append(pp.view(-1))\n return torch.cat(params)", "def _get_parameters(self) -> list:\n return self.parameters", "def getListOfParameters(self):\n return self.model.getListOfParameters()", "def get_params(self):\n return list(self.params.values())", "def getParameters(self):\n params = []\n for m in [self.ix, self.ih, self.fx, self.fh, self.ox, self.oh, self.ux, self.uh]:\n # we do not get param of output module\n l = list(m.parameters())\n params.extend(l)\n\n one_dim = [p.view(p.numel()) for p in params]\n params = F.torch.cat(one_dim)\n return params", "def getParameters(self):\n params = []\n for m in [self.ix, self.ih, self.fx, self.fh, self.ox, self.oh, self.ux, self.uh]:\n # we do not get param of output module\n l = list(m.parameters())\n params.extend(l)\n\n one_dim = [p.view(p.numel()) for p in params]\n params = F.torch.cat(one_dim)\n return params", "def inference_parameters(self):\n params = nn.ParameterList()\n if 'parameters' in dir(self.inference_model):\n params.extend(list(self.inference_model.parameters()))\n params.extend(list(self.latent.inference_parameters()))\n return params", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def parameters(self):\n return [o.parameters for o in self.obs]", "def parameters(self):\n return self.vars", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def _get_model_params(self) -> T.List[np.ndarray]:\n layers = {\n layer.name: numpy_helper.to_array(layer)\n for layer in self.onnx_model.graph.initializer\n }\n\n param_names = [\n \"imageinput_Mean\",\n \"conv_1_W\",\n \"conv_1_B\",\n \"batchnorm_1_mean\",\n \"batchnorm_1_var\",\n \"batchnorm_1_scale\",\n \"batchnorm_1_B\",\n \"conv_2_W\",\n \"conv_2_B\",\n \"batchnorm_2_mean\",\n \"batchnorm_2_var\",\n \"batchnorm_2_scale\",\n \"batchnorm_2_B\",\n \"conv_3_W\",\n \"conv_3_B\",\n \"batchnorm_3_mean\",\n \"batchnorm_3_var\",\n \"batchnorm_3_scale\",\n \"batchnorm_3_B\",\n \"fc_1_W\",\n \"fc_1_B\",\n \"fc_2_W\",\n \"fc_2_B\",\n \"fc_3_W\",\n \"fc_3_B\",\n ]\n\n params = [layers[param] for param in param_names]\n return params", "def params(self):\n return [p for sublist in [o.params for o in self.obs] for p in sublist]", "def parameters(self):\n return self.model.parameters()", "def get_parameters(self):\n return self.context.params", "def parameters(self):\n return self.pars", "def getListOfParameters(self, *args):\n return _libsbml.KineticLaw_getListOfParameters(self, *args)", "def parameters_list(self):\n return [getattr(self.parameters, p) for p in self.parameters_names()]", "def get_params(self):\n return {\n 'dropout': self._dropout,\n 'layer_size': self._layer_size,\n 'num_layers': self._num_layers,\n 'embedding_layer_size': self._embedding_layer_size,\n 'controller_type': self._controller_type\n }", "def parameters(self):\n return self.trainer_parameters", "def parameters(self):\n return self.trainer_parameters", "def params(self):\n return self._pars", "def get_layer_params(self):\n return self.layer_params", "def parameters(self):\n return self._params", "def parameters(self):\n return [term.parameter for term in self.terms]" ]
[ "0.7535987", "0.7381716", "0.7132315", "0.704354", "0.7043289", "0.7009622", "0.7009622", "0.70040184", "0.6953776", "0.691929", "0.69050103", "0.68977284", "0.68977284", "0.68977284", "0.68977284", "0.68977284", "0.6875893", "0.68742156", "0.68739325", "0.6872487", "0.684921", "0.68472654", "0.68291426", "0.6818245", "0.6816942", "0.6816942", "0.6792137", "0.6777787", "0.6771076", "0.6770322" ]
0.78168494
0
Get dictionary of parameters partitioned by layer number.
def get_params_by_layer(model) -> Dict: layers = defaultdict(list) for name, param in model.named_parameters(): pieces = name.split(".") if pieces[0] == "encoder" and pieces[1] == "block": layer = int(pieces[2]) layers[layer].append(param) return layers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parameters_dict(self):\n return dict(zip(self.parameters_names(), self.parameters_list))", "def get_all_params(layer):\n layers = get_all_layers(layer)\n params = sum([l.get_params() for l in layers], [])\n return utils.unique(params)", "def get_layer_params(self):\n return self.layer_params", "def get_parameters(self):\n params = {\"train_frac\": self.train_frac, \"split_alg\": self.split_alg,\n \"nw_name\": self._nw_name, \"split_id\": self.split_id}\n return params", "def get_params(self):\n return {\n \"nspecies\": self.nspecies,\n \"lmax\": self.lmax,\n \"nmax\": self.nmax,\n \"rcut\": self.rcut,\n \"sigma\": self.sigma,\n \"trans_width\": self.trans_width\n }", "def parameters_config(self) -> dict:\n if not self.params_optional and not self.params_required:\n return {}\n parameters = dict()\n for parameter, parameter_details in self.parameters.items():\n parameters[parameter_details.name] = parameter_details.parameter_config()\n return parameters", "def layerParamKeys(self):\n return self._layerParamKeys", "def get_layer_ids(\n self,\n ):\n name_to_id = {}\n for n, _ in self.named_parameters():\n name_to_id[n] = 0\n return name_to_id", "def get_params(self):\n return {'classifier': self.classifier,\n 'grid_param': self.grid_param,\n 'n_param_comb': self.n_param_comb,\n 'top_bagging': self.bagging,\n 'bagging_param': self.bagging_param,\n 'comb_seed': self.comb_seed}", "def parameters_dict(self):\n return", "def getNumberParams(self):\n params = pd.DataFrame(columns=('filters', 'depth', 'width', 'height', 'bias', 'total'))\n for layer, weights in self.net.params.iteritems():\n vec = []\n total = (np.product(weights[0].data.shape) + np.product(weights[1].data.shape))\n values = weights[0].data.shape\n if len(weights) < 4:\n values = np.lib.pad(values, (0,4-len(values)), 'constant', constant_values=-1)\n vec.extend(values)\n vec.extend(weights[1].data.shape)\n vec.append(total)\n params.loc[layer] = vec\n return params", "def get_parameters(self) -> Dict[str, ParameterInfo]:\n parameter_info_list = {}\n\n for associated_op in self.associated_ops:\n word_tensor = self._get_word_tensor(associated_op)\n position_tensor = self._get_position_tensor(associated_op)\n token_tensor = self._get_token_tensor(associated_op)\n\n for param_tensor in [word_tensor, position_tensor, token_tensor]:\n op_with_param = None\n for consumer in param_tensor.consumers():\n if not consumer.name.startswith('gradients/'):\n assert op_with_param is None\n op_with_param = consumer\n assert op_with_param is not None\n parameter_info_list[param_tensor.op.name] = ParameterInfo('weight', [op_with_param.name])\n\n return parameter_info_list", "def _LayerParams(ii):\n if isinstance(p.transformer_layer_params_tpl, list):\n factor = p.num_layers // len(p.transformer_layer_params_tpl)\n i = ii // factor\n p_ii = p.transformer_layer_params_tpl[i].Copy()\n else:\n p_ii = p.transformer_layer_params_tpl.Copy()\n p_ii.name = 'layer_%d' % ii\n p_ii.has_aux_atten = p.has_aux_atten\n p_ii.mask_self_atten = p.mask_self_atten\n p_ii.input_dim = p.mdl_dim or p_ii.input_dim\n p_ii.output_dim = p.mdl_dim or p_ii.output_dim\n p_ii.packed_input = p.packed_input\n if (not isinstance(p_ii.tr_atten_tpl.num_heads, list) and\n p.num_atten_heads is not None):\n p_ii.tr_atten_tpl.num_heads = p.num_atten_heads\n if p.dropout_prob is not None:\n p_ii.tr_atten_tpl.atten_dropout_prob = p.dropout_prob\n p_ii.tr_atten_tpl.residual_dropout_prob = p.dropout_prob\n p_ii.tr_fflayer_tpl.residual_dropout_prob = p.dropout_prob\n p_ii.tr_fflayer_tpl.relu_dropout_prob = p.dropout_prob\n if p.stochastic_depth_droppath_prob is not None:\n ratio = p.stochastic_depth_droppath_prob * ii / (p.num_layers - 1)\n p_ii.tr_atten_tpl.residual_droppath_prob = ratio\n p_ii.tr_fflayer_tpl.residual_droppath_prob = ratio\n if p.hidden_dim is not None:\n p_ii.tr_fflayer_tpl.hidden_dim = p.hidden_dim\n p_ii.tr_atten_tpl.add_unnormalized_input = p.add_unnormalized_input\n if ii in p.moe_layers:\n p_ii.tr_fflayer_tpl = _MoeLayerParams(p_ii.tr_fflayer_tpl)\n return p_ii", "def gather_params(self):\n for layer in self.layers:\n for name, value in layer.params.iteritems():\n self.params[name] = value", "def params(self):\n result = {}\n weight_sets = self._build_weight_definitions(self.n_dim)\n for name in weight_sets.groups:\n result[name] = getattr(self, name)\n for name in weight_sets.singles:\n result[name] = getattr(self, name)\n result['score_net_params'] = self.score_net_params\n result['value_net_params'] = self.value_net_params\n return result", "def get_param_grid():\n layer_width = [32, 64, 128, 256, 512]\n layers = [2, 3, 4, 5, 6]\n epochs = [10, 25, 50, 75, 100]\n batch_size = [32, 64, 96, 128, 160, 192, 224, 256]\n activation = ['softmax', 'softplus', 'softsign', 'relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear']\n init_mode = ['uniform', 'lecun_uniform', 'normal', 'zero', 'glorot_normal', 'glorot_uniform', 'he_normal',\n 'he_uniform']\n dropout_rate = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]\n optimizer = ['adam', 'sgd', 'adadelta', 'adagrad', 'adamax', 'ftrl', 'nadam', 'rmsprop']\n\n grid = {'layer_width': layer_width,\n 'layers': layers,\n 'epochs': epochs,\n 'batch_size': batch_size,\n 'activation': activation,\n 'init_mode': init_mode,\n 'dropout_rate': dropout_rate,\n 'optimizer': optimizer}\n\n return grid", "def get_data_dict(params, x):\n parameters = {}\n for i, p in enumerate(feature_map.ordered_parameters):\n parameters[p] = x[i]\n for i, p in enumerate(var_form.ordered_parameters):\n parameters[p] = params[i]\n return parameters", "def get_params(self):\n return {\n 'dropout': self._dropout,\n 'layer_size': self._layer_size,\n 'num_layers': self._num_layers,\n 'embedding_layer_size': self._embedding_layer_size,\n 'controller_type': self._controller_type\n }", "def get_params(self, deep=True):\n #params = dict(kernel=self.kernel, dim=self.dim)\n params = dict(dim=self.dim)\n if deep:\n deep_items = self.kernel.get_params().items()\n params.update((k, val) for k, val in deep_items)\n return params", "def initialize_parameters(layer_dim):\n #tf.set_random_seed(0)\n L= len(layer_dim)\n parameters={}\n for i in range(1,L):\n parameters[\"W\" +str(i)] = tf.get_variable(\"W\"+str(i), [layer_dim[i],layer_dim[i-1]], initializer = tf.contrib.layers.xavier_initializer(seed=1))\n parameters[\"b\" +str(i)] = tf.get_variable(\"b\" +str(i),[layer_dim[i],1],initializer= tf.zeros_initializer())\n assert(parameters['W' + str(i)].shape == (layer_dim[i], layer_dim[i-1]))\n assert(parameters['b' + str(i)].shape == (layer_dim[i], 1))\n return parameters", "def param_name_dict():\n\n layer = caffe_pb2.LayerParameter()\n # get all parameter names (typically underscore case) and corresponding\n # type names (typically camel case), which contain the layer names\n # (note that not all parameters correspond to layers, but we'll ignore that)\n param_names = [s for s in dir(layer) if s.endswith('_param')]\n param_type_names = [type(getattr(layer, s)).__name__ for s in param_names]\n # strip the final '_param' or 'Parameter'\n param_names = [s[:-len('_param')] for s in param_names]\n param_type_names = [s[:-len('Parameter')] for s in param_type_names]\n return dict(zip(param_type_names, param_names))", "def get_all_param_values(layer):\n params = get_all_params(layer)\n return [p.get_value() for p in params]", "def get_params(self):\n params = {}\n for step in self.steps:\n params[step[0]] = step[1].get_params()\n return params", "def paramDetails(cls):\n return {\n 'dim': (10, 20, 2, 20),\n 'nIter': (1, 10, 2, 5),\n 'lamb': (.1, 1., .1, .05),\n 'alph': (30, 50, 5, 40)\n }", "def extract_parameters(self) -> Dict[str, Set[str]]:\n regex = \"\\{([A-Za-z0-9_]+)\\}\"\n reserved_parameters = [\n \"output\",\n \"input\",\n \"output_vec\",\n \"input_vec\",\n \"df\",\n \"vec_open\",\n \"vec_close\",\n ]\n parameters = {}\n for scope in self.scopes:\n parameters[scope] = set(\n [\n x\n for x in re.findall(regex, self.call)\n if x not in reserved_parameters\n ]\n )\n return parameters", "def get_params(self, deep=True):\n #params = dict(kernel=self.kernel, dim=self.dim)\n params = dict(columns=self.columns)\n if deep:\n for i, kernel in enumerate(self.kernels):\n print(\"--->\", \"\\ti = \", i, \"\\tkernel = \", kernel)\n deep_items = kernel.get_params().items()\n #params.update((k, val) for k, val in deep_items)\n for k, val in deep_items:\n print(\"\\tkey = \", k, \"\\tvalue = \", val)\n params.update(('k{}__{}'.format(i, k), val) for k, val in deep_items)\n return params", "def parameters(self):\n return {\n 'base':self.base.parameters(),\n 'material':[m.parameters() for m in self.material],\n 'fraction':self.fraction,\n }", "def _get_params(conv_layer, bn_layer, relu_layer=None):\n if 'use_bias' in conv_layer['config']:\n if conv_layer['config']['use_bias']:\n raise ValueError(\n 'use_bias should not be set to True in a Conv layer when followed '\n 'by BatchNormalization. The bias in the Conv would be redundant '\n 'with the one in the BatchNormalization.')\n\n del conv_layer['config']['use_bias']\n\n if 'name' in bn_layer['config']:\n del bn_layer['config']['name']\n\n # TODO(pulkitb): remove key conflicts\n params = dict(\n list(conv_layer['config'].items()) + list(bn_layer['config'].items()))\n\n if relu_layer is not None:\n params['post_activation'] = keras.layers.deserialize(relu_layer)\n\n return params", "def _get_params(conv_layer, bn_layer, relu_layer=None):\n if 'use_bias' in conv_layer['config']:\n if conv_layer['config']['use_bias']:\n raise ValueError(\n 'use_bias should not be set to True in a Conv layer when followed '\n 'by BatchNormalization. The bias in the Conv would be redundant '\n 'with the one in the BatchNormalization.')\n\n del conv_layer['config']['use_bias']\n\n if 'name' in bn_layer['config']:\n del bn_layer['config']['name']\n\n # TODO(pulkitb): remove key conflicts\n params = dict(\n list(conv_layer['config'].items()) + list(bn_layer['config'].items()))\n\n if relu_layer is not None:\n params['post_activation'] = keras.layers.deserialize(relu_layer)\n\n return params", "def parameters(self):\n return {\"P\": self.P,\n \"T\": self.T}" ]
[ "0.6692574", "0.65567654", "0.64245135", "0.6398583", "0.6372712", "0.6344212", "0.62882966", "0.62654275", "0.62573886", "0.6213964", "0.6201111", "0.619871", "0.6188635", "0.61766535", "0.6173816", "0.6171546", "0.61674243", "0.6164119", "0.6154546", "0.6140772", "0.6101595", "0.6095853", "0.6088415", "0.6085223", "0.60690856", "0.6064468", "0.6060108", "0.6036446", "0.6036446", "0.6026064" ]
0.70265716
0
Look for new photos on the google drive
def main(): credentials = get_credentials() http = credentials.authorize(httplib2.Http()) service = discovery.build('drive', 'v3', http=http) i = 0 total = 0 nextPageToken=None while True: results = service.files().list( pageSize=30, fields="nextPageToken, files(id, name, mimeType, modifiedTime)", spaces='photos', pageToken=nextPageToken ).execute() items = results.get('files', []) nextPageToken = results.get("nextPageToken") if not items: print('No files found.') else: for item in items: if item['mimeType'].split('/')[0] != 'image': continue if vcoll.findBySrcId(item['id']) is not None: continue destination = 'image_tags/validation/' + item['name'] file_content = get_file_stream(service, item['id']) if file_content and image_handler.is_valid_image(file_content): file_handler.upload_file_stream(destination, file_content) vcoll.insertValidationImage(destination, item['id'], item['modifiedTime']) total += 1 print("Downloaded {0} photos".format(total)) i += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def retrieve_pics(drive_service):\n pic_q = Queue()\n page_token = None\n while True:\n response = drive_service.files().list(q=\"mimeType='image/jpeg'\",\n spaces='drive',\n fields='nextPageToken, files(id, name)',\n pageToken=page_token).execute()\n\n for file in response.get('files', []):\n if file.get('id') not in classify_data:\n pic_q.put((file.get('id'), file.get('name')))\n\n page_token = response.get('nextPageToken', None)\n if page_token is None:\n break\n\n print ('Found %s pictures' % pic_q.qsize()) # prints no. of new pics found\n return pic_q", "def searchImage(text):\n creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.json'):\n creds = Credentials.from_authorized_user_file('token.json', SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(creds.to_json())\n\n service = build('drive', 'v3', credentials=creds)\n\n results_length = 0\n results_message = []\n nextPageToken = \"first\"\n flex_dict = {\n \"type\": \"carousel\",\n \"contents\": []\n }\n\n with open(\"episode_info.json\", 'r', encoding=\"utf-8\") as f:\n episode_info = json.load(f)\n\n # imageFolder: https://drive.google.com/drive/u/0/folders/1CH7i08P4NK0WkhASe4_qs92fsL2Mz0tM\n\n while nextPageToken != []:\n if nextPageToken == \"first\":\n results = service.files().list(q=\"'1CH7i08P4NK0WkhASe4_qs92fsL2Mz0tM' in parents and fullText contains '{}'\".format(text), pageSize=1000,\n fields=\"nextPageToken, files(id, name)\").execute()\n else:\n results = service.files().list(q=\"'1CH7i08P4NK0WkhASe4_qs92fsL2Mz0tM' in parents and fullText contains '{}'\".format(text), pageSize=1000, pageToken=nextPageToken,\n fields=\"nextPageToken, files(id, name)\").execute()\n items = results.get('files', [])\n nextPageToken = results.get('nextPageToken', [])\n print(len(items))\n for item in items:\n img_url = \"https://lh3.googleusercontent.com/d/{}=w1080\".format(\n item['id'])\n # print(u'{0} ({1})'.format(item['name'], img_url))\n\n positino_left_brackets = item['name'].find(\"【\")\n position_right_brackets = item['name'].find(\"】\")\n position_dot = item['name'].find(\".jpg\")\n img_episode = item['name'][positino_left_brackets +\n 1:position_right_brackets]\n img_title = item['name'][position_right_brackets+1:position_dot]\n if img_title.find(text) != -1:\n results_length += 1\n if len(flex_dict[\"contents\"]) < 12:\n try:\n esfio = episode_info[img_episode][\"Every Spongebob Frame In Order\"]\n chn_episode_title = episode_info[img_episode][\"中文集數名稱\"]\n eng_episode_title = episode_info[img_episode][\"英文集數名稱\"]\n except:\n pass\n\n new_bubble_flex_message = {}\n\n with open(\"bubble_flex_message.json\", \"r\", encoding='utf-8') as f:\n new_bubble_flex_message = json.load(f)\n\n new_bubble_flex_message[\"hero\"][\"url\"] = img_url\n new_bubble_flex_message[\"body\"][\"contents\"][0][\"contents\"][0][\"text\"] = img_title\n new_bubble_flex_message[\"body\"][\"contents\"][1][\"contents\"][0][\"contents\"][1][\"text\"] = img_episode\n new_bubble_flex_message[\"body\"][\"contents\"][1][\"contents\"][1][\"contents\"][1][\"text\"] = esfio\n new_bubble_flex_message[\"body\"][\"contents\"][1][\"contents\"][2][\"contents\"][1][\"text\"] = chn_episode_title\n new_bubble_flex_message[\"body\"][\"contents\"][1][\"contents\"][3][\"contents\"][1][\"text\"] = eng_episode_title\n new_bubble_flex_message[\"footer\"][\"contents\"][0][\"action\"][\"data\"] = \"傳\" + img_url\n new_bubble_flex_message[\"footer\"][\"contents\"][1][\"action\"][\"uri\"] = img_url\n\n flex_dict[\"contents\"].append(new_bubble_flex_message)\n if len(flex_dict[\"contents\"]) == 12:\n if len(results_message) < 5:\n results_message.append(FlexSendMessage(\n alt_text=\"搜尋結果\",\n contents=flex_dict)\n )\n if len(results_message) == 5:\n print(\"len(results_message)1: \", len(results_message))\n return {\"length\": results_length, \"top5_url_list\": results_message}\n flex_dict[\"contents\"] = []\n if nextPageToken == []:\n if len(flex_dict[\"contents\"]) != 0:\n if len(results_message) < 5:\n results_message.append(FlexSendMessage(\n alt_text=\"搜尋結果\",\n contents=flex_dict)\n )\n print(\"len(results_message)2: \", len(results_message))\n return {\"length\": results_length, \"top5_url_list\": results_message}\n\n print(\"len(results_message)3: \", len(results_message))\n return {\"length\": results_length, \"top5_url_list\": results_message}", "def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('drive', 'v3', credentials=creds)\n\n print()\n\n # look for a specific folder and get its id\n\n page_token = None\n folder_name = \"Teaching!\"\n folder_id = None\n while True:\n response = service.files().list(\n q=\"mimeType='application/vnd.google-apps.folder' and name = '\" + folder_name + \"'\",\n spaces='drive',\n fields='nextPageToken, files(id, name)',\n pageToken=page_token).execute()\n for file in response.get('files', []):\n # Process change\n folder_id = file.get('id')\n print('Found folder: %s (%s)' % (file.get('name'), file.get('id')))\n page_token = response.get('nextPageToken', None)\n if page_token is None:\n break\n print()\n print()\n\n # check if file with same name already exists\n file_to_upload_path = 'samplefiles/sky.jpg'\n name_of_uploaded_file = 'sky.jpg'\n response = service.files().list(\n q=\"trashed = false and name = '\" + name_of_uploaded_file + \"' and parents in '\" + folder_id + \"'\",\n spaces='drive',\n fields='nextPageToken, files(id, name)',\n pageToken=page_token).execute()\n files = response.get('files', [])\n if files:\n print(\"File with name {0} in {1} already exists!\".format(name_of_uploaded_file, folder_name))\n print('File info: %s (%s)' % (files[0].get('name'), files[0].get('id')))\n else:\n print(\"File with name {0} does not exist in {1}.\".format(name_of_uploaded_file, folder_name))\n\n # do the upload\n print()\n print(\"Uploading file with name {0} to folder {1}\".format(name_of_uploaded_file, folder_name))\n file_metadata = {'name': name_of_uploaded_file, 'parents': [folder_id]}\n manager = multiprocessing.Manager() # need these lines because we have a return value we care about\n return_dict = manager.dict()\n\n p = multiprocessing.Process(target=upload_file, args=(service, file_metadata, file_to_upload_path, return_dict))\n p.start()\n p.join(10)\n if p.is_alive():\n print(\"still running... let's kill it\")\n p.kill()\n p.join()\n file = return_dict['file']\n print()\n if file.get('id'):\n print('File uploaded successfully. File ID: %s' % file.get('id'))\n else:\n print('File failed to upload!')\n\n print()\n print()", "def main():\n service = discovery.build('drive', 'v3', http=get_http())\n\n pic_q = retrieve_pics(service)\n classify_pics(pic_q)", "def main():\n store = file.Storage('token.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('drive', 'v2', http=creds.authorize(Http()))\n files_list = print_files_in_folder(service,'1Xy6wJozhJwLcsKKfNYASxDxBBbEHZoNy')\n if len(files_list) > 0:\n download_files(service,files_list)\n else:\n print \"No files to download\"\n\n # Call the Drive v3 API\n # results = service.files().list(q=\"mimeType='application/vnd.google-apps.folder' and 'AviPics' in parents\",\n # pageSize=10, fields=\"nextPageToken, files(id, name)\").execute()\n # results = service.children.list(folderId='1Xy6wJozhJwLcsKKfNYASxDxBBbEHZoNy')\n # items = results.get('files', [])\n\n # if not items:\n # print('No files found.')\n # else:\n # print('Files:')\n # for item in items:\n # print('{0} ({1})'.format(item['name'], item['id']))", "def scan_images(self):\n rtn = 0\n mime_list = self.db.get_mime_list()\n (results,count) = datastore.find({})\n for f in results:\n dict = f.get_metadata().get_dictionary()\n if dict[\"mime_type\"] in mime_list:\n #record the id, file size, file date, in_ds\n self.db.create_picture_record(f.object_id, f.get_file_path())\n rtn += 1\n f.destroy()\n self.db.commit()\n _logger.debug('%s entries found in journal. Number of pictures %s'%(count,rtn,))\n return rtn", "def main():\n page_size = 1000\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('../vault/token.pickle'):\n with open('../vault/token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n '../vault/credentials.json', SCOPES)\n creds = flow.run_local_server()\n # Save the credentials for the next run\n with open('../vault/token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('drive', 'v3', credentials=creds)\n\n # Should be able to do all these:\n # - list all files containing a string in the file body (not just in the name)\n # - file count within a folder\n # Specifically for our use case, we might like to\n # - list all folders, recursively\n # - list all objects within a folder\n # so we could parallelize the bulk loading by folder (or folder group)\n\n # Call the Drive v3 API\n c = Counter()\n\n def inside(c=c):\n request = service.files().list(\n pageSize=page_size,\n # fields=\"nextPageToken, files(id, name)\",\n fields=\"nextPageToken, files(id, name, parents, mimeType)\",\n # q=\"mimeType='image/jpeg'\"\n # q=\"mimeType='application/vnd.google-apps.folder' and name contains 'Dummy Files'\"\n # doesn't work, should\n # q=\"'{id}' in parents\".format(id=DUMMY_FOLDER_ID)\n # full text seems to search ONLY name, not actual text\n # q=\"fullText contains 'cyan'\"\n # full text seems to search ONLY name, not actual text\n # q=\"fullText contains 'very'\"\n # works\n # q=\"'root' in parents\"\n # did not work\n # q=\"mimeType='application/vnd.google-apps.folder' and 'root' in parents and trashed=false\"\n # q = \"name='Dummy Folder' and mimeType='application/vnd.google-apps.folder'\"\n # did not work\n # q=\"'{myid}' in parents\".format(myid=MY_FOLDER_ID)\n # q=\"mimeType='application/vnd.google-apps.folder'\"\n # q=\"'0AHs_lHBwwE6AUk9PVA' in parents\"\n # q=\"'{f}' in parents and mimeType='application/vnd.google-apps.folder'\".format(f=MY_FOLDER_ID)\n q=\"not 'root' in parents\"\n )\n\n show = 20\n page = 0\n while request:\n response = request.execute()\n items = response.get('files', [])\n if not items:\n print('No files found.')\n return\n else:\n print('Files: {page}'.format(page=page))\n page += 1\n for item in items:\n d = (b64_to_long(item['id']) >> 2)\n m = d % MODULUS\n c[m] += 1\n print(u'{n} {t} ({i} = {d}) {m}'.format(n=item['name'],\n i=item['id'],\n d=d,\n m=m,\n t=item['mimeType']))\n show -= 1\n if show <= 0:\n return\n request = service.files().list_next(previous_request=request,\n previous_response=response)\n\n inside()\n print(c)", "def get_files_search(query, video=False):\n\n creds = authenticate()\n service = build('drive', 'v3', credentials=creds)\n\n folder_ids, folder_names = get_folder_ids(ROOT_PHOTO_FOLDER_ID)\n files = []\n found_files = []\n gd_query = \"'{}' in parents and trashed = false and (mimeType = 'image/jpeg' or mimeType = 'image/png' or mimeType = 'image/svg+xml')\"\n\n if video:\n gd_query = \"'{}' in parents and trashed = false and mimeType = 'video/mp4'\"\n\n for id in folder_ids:\n page_token = None\n response = service.files().list(q=gd_query.format(id),\n pageSize=10,\n spaces=\"drive\",\n fields='nextPageToken, files(id, name, webViewLink, description)',\n pageToken=page_token,\n\n ).execute()\n files.extend(response[\"files\"]) # simply add to the current list of files found in other folders\n\n for file in files:\n if (\n file[\"name\"].rfind(query) != -1 or \n \"description\" in file and file[\"description\"].rfind(query) != -1\n ):\n found_files.append(file)\n\n return found_files", "def go(start, query, path):\n BASE_URL = 'https://ajax.googleapis.com/ajax/services/search/images?'\\\n 'v=1.0&q=' + query + '&start=%d'\n\n BASE_PATH = os.path.join(path, query)\n\n if not os.path.exists(BASE_PATH):\n os.makedirs(BASE_PATH)\n\n# start = 0 # Google's start query string parameter for pagination.\n dest = start + 60\n while start < dest: # Google will only return a max of 56 results.\n r = requests.get(BASE_URL % start)\n print BASE_URL % start\n for image_info in json.loads(r.text)['responseData']['results']:\n url = image_info['unescapedUrl']\n try:\n image_r = requests.get(url)\n except ConnectionError, e:\n print 'could not download %s' % url\n continue\n\n # Remove file-system path characters from name.\n title = image_info['titleNoFormatting'].replace('/', '').replace('\\\\', '')\n\n file = open(os.path.join(BASE_PATH, '%s.jpg') % title, 'w')\n try:\n Image.open(StringIO(image_r.content)).save(file, 'JPEG')\n except IOError, e:\n # Throw away some gifs...blegh.\n print e\n print 'could not save %s' % url\n continue\n finally:\n file.close()\n\n print start\n start += 4 # 4 images per page.\n\n # Be nice to Google and they'll be nice back :)\n time.sleep(1.5)", "def search(file_path):\n search_url = 'http://www.google.hr/searchbyimage/upload'\n multipart = {'encoded_image': (file_path, open(file_path, 'rb')), 'image_content': ''}\n response = requests.post(search_url, files=multipart, allow_redirects=False)\n fetch_url = response.headers['Location']\n webbrowser.open(fetch_url)", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('drive', 'v3', http=http)\n\n fileId = '0B3wvsjTJuTRQMHhWY2JlLW9iSnM'\n new_file = r'./test/data/repos.zip'\n # request = service.files().get_media(fileId=fileId)\n # print(request.to_json())\n\n #f = service.revisions().list(fileId=fileId).execute()\n # f = service.revisions().get(fileId=fileId, revisionId='0B3wvsjTJuTRQS09JdnhvMkpBRTlSS2NoVXZiRlZETGMyTWdBPQ', fields='originalFilename, size').execute()\n f = service.files().get(fileId=fileId, fields='name, size').execute()\n print(f)\n # file_path = r'/Volumes/C/GisWork/drive_sgid/test_outputs/Trails_gdb.zip'\n # local_file_hash = hashlib.md5(open(file_path, 'rb').read()).hexdigest()\n # print(local_file_hash)\n # upFile = update_file(service, fileId, 'repos.zip', 'ohyeah2', 'application/zip', new_file)\n # print(upFile)\n\n #revs = retrieve_revisions(service, fileId)\n #print(len(revs))", "def check_for_recent_images(self):\n ds_list = []\n num_found = 0\n mime_list = ['image/jpg','image/png','image/jpeg','image/gif',]\n \n #build 650 doesn't seem to understand correctly the dictionary with a list right hand side\n info = xophotoactivity.sugar_version()\n if len(info)>0:\n (major,minor,micro,release) = info\n _logger.debug('sugar version major:%s minor:%s micro:%s release:%s'%info)\n else:\n _logger.debug('sugar version failure')\n minor = 70\n if minor > 80:\n (results,count) = datastore.find({'mime_type': ['image/jpeg','image/jpg', 'image/png','image/gif']})\n else:\n (results,count) = datastore.find({'mime_type': 'image/jpeg'})\n ds_list.extend(results)\n num_found += count \n (results,count) = datastore.find({'mime_type': 'image/jpg'})\n ds_list.extend(results)\n num_found += count\n (results,count) = datastore.find({'mime_type': 'image/png'})\n ds_list.extend(results)\n num_found += count\n (results,count) = datastore.find({'mime_type': 'image/gif'})\n ds_list.extend(results)\n num_found += count\n \n _logger.debug('Journal/datastore entries found:%s'%num_found)\n added = 0\n a_row_found = False\n cursor = self.db.connection().cursor()\n journal_list = []\n for ds in ds_list:\n #at least for now assume that the newest images are returned first\n if not a_row_found:\n journal_list.append(ds.object_id)\n dict = ds.get_metadata().get_dictionary()\n if dict[\"mime_type\"] in mime_list:\n cursor.execute('select * from groups where category = ? and jobject_id = ?',\\\n (display.journal_id,str(ds.object_id),))\n rows = cursor.fetchall()\n if len(rows) == 0:\n #may need to add date entered into ds (create date could be confusing)\n self.db.put_ds_into_picture(ds.object_id)\n self.db.add_image_to_album(display.journal_id,ds.object_id)\n added += 1\n else: #assume that pictures are returned in last in first out order\n #no longer true since we are getting each mime_type separately (build 650 kludge)\n #a_row_found = True\n pass\n ds.destroy()\n #now go through albums and remove references that are no longer in datastore\n #cursor.execute('select * from groups')\n _logger.debug('scan found %s. Added %s datastore object ids from datastore to picture'%(count,added,))\n return (num_found,added,)", "def get_photos_from_device(self):\n\n page = requests.get(self.device_img_url)\n\n soup = BeautifulSoup(page.text)\n\n img_names = [\n element['href'] for element in soup.findAll('a', attrs={'class': 'link'})\n ]\n\n if len(img_names) > 0:\n for name in img_names:\n\n opener = urllib.URLopener()\n\n opener.retrieve(self.device_img_url+name,\n self.local_img_dir+name)\n\n self.camera.command('delete_all')\n\n return True\n\n else:\n return False", "def google_image(query, num_results):\n\tif not API_KEY:\n\t\traise ConfigException(\"Require API_KEY for googleapi. Reload after setting.\")\n\td = { \"q\" : query.encode(\"utf-8\"), \"key\" : API_KEY, \"cx\" : CSE_ID, \"num\" : num_results, \"searchType\" : \"image\",\n\t\t\"fields\" : \"spelling/correctedQuery,items(title,link)\"}\n\t\t#TODO: consider displaying img stats like file size and resolution?\n\tf = urlopen(SEARCH_URL % (urlencode(d)))\n\tgdata = load(f)\n\tif f.getcode() == 200:\n\t\tresults = []\n\t\tspelling = gdata.get(\"spelling\")\n\t\tif spelling: spelling = spelling[\"correctedQuery\"]\n\t\tif \"items\" in gdata:\n\t\t\tfor item in gdata[\"items\"]:\n\t\t\t\tresults.append((item['title'], item['link']))\n\t\treturn (spelling, results)\n\telse:\n\t\traise RuntimeError(\"Error: %s\" % (gdata.replace(\"\\n\", \" \")))", "def ls(self):\n files = self.drive.files().list().execute().get(\"files\", [])\n for f in files:\n print(f[\"name\"], f[\"mimeType\"])", "def google_drive_authenticate(self):", "def photos():\n cwd = os.getcwd()\n db_path = os.path.join(cwd, CLI_PHOTOS_DB)\n return PhotosDB(db_path).photos(intrash=True)", "def test_get_photos_paging(self):\n pass", "def __main():\n\n\n#Makes HTTP request to Google Drive API 'copy' method\n def copyFile(token, target_name):\n print(\"Access Token: \" + token)\n url_destino = \"https://www.googleapis.com/drive/v2/files/0AilPd9i9ydNTdFc4a2lvYmZnNkNzSU1kdVFZb0syN1E/copy\" \\\n \"?key=(YOUR_API_KEY provided by Google API Console)\"\n values = \"{'title': '%s'}\" % target_name\n data = values.encode('utf-8')\n request = urllib.request.Request(url_destino, data, method='POST')\n request.add_header(\"Authorization\", \"Bearer \" + token)\n request.add_header(\"Content-Length\", len(data))\n request.add_header(\"Content-Type\", \"application/json\")\n print(request.header_items())\n f = urllib.request.urlopen(request)\n print(f.read())\n\n#Makes HTTP request to Google Drive API 'list' files method\n def listFiles(token):\n print(\"Access Token: \" + token)\n url_destino = \"https://www.googleapis.com/drive/v2/files?key=(YOUR_API_KEY provided by Google API Console)\"\n request = urllib.request.Request(url_destino)\n request.add_header(\"Authorization\", \"Bearer \" + token)\n f = urllib.request.urlopen(request)\n print(f.read())\n\n\n oauth2 = OAuth2()\n token = oauth2.getAccessToken()\n if token == None:\n print(\"Input the following URL into your browser and access it!\")\n print(oauth2.getUrlForAuthCode())\n code = input(\"Paste the code ---> \")\n token = oauth2.getAccessToken(code)\n\n\n try:\n listFiles(token)\n except urllib.error.HTTPError as e:\n if e.code == 401:\n token = oauth2.refreshAccessToken()\n listFiles(token)\n else:\n print(e.code)\n\n\n try:\n copyFile(token, 'HiperAgenda_Copiada_3')\n except urllib.error.HTTPError as e:\n if e.code == 401:\n token = oauth2.refreshAccessToken()\n copyFile(token, 'HiperAgenda_Copiada')\n else:\n print(e.code)", "def test_retrieval_of_user_photos(self):\t\n\t\tget_response = self.client.get(reverse('photos'))\n\n\t\tself.assertEqual(get_response.status_code, status.HTTP_200_OK)\n\t\tdata = [i.values() for i in get_response.data]\n\t\tself.assertIn(u'{}'.format(self.image_name), data[0])", "def get_vr_photos(self, count = 30, page = 1):\n uri = 'photos/vr'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def boxSearch(client):\n\tfiles = []\n\titems_iter = client.folder(folder_id=0).get_items(limit=100, offset=0)\n\tfor x in items_iter:\n\t\tfiles.append(x)\n\treturn files", "def add_imgs(self, path_imgs):\n try:\n for img in path_imgs:\n if img in self.listImages.previews:\n print('image {} is already in the list'.format(img))\n path_imgs.remove(img)\n self.listImages.previews = path_imgs\n if self.listImages.current_img == None:\n self.upload_img(self.listImages.previews[0])\n except:\n return", "def view_images(request):\n user_root = request.session['user_root']\n search_id = request.session['search_id']\n with open(os.path.join(user_root, search_id, 'info.json')) as f:\n info = json.load(f)\n object_id_list = info['object_id_list']\n image_type_list = info['image_type_list']\n search_pattern = info['search_pattern']\n image_dir = scan_images(user_root, search_id, image_type_list,relative_path=True)\n\n # Add flag for conditional representation.\n flag_scan = False\n flag_classifier=info['flag_classifier']\n if search_pattern == \"scan\":\n flag_scan = True\n bounding_box_dict = scan_bb_images(\n user_root, search_id, folder_name=\"scans\")\n else:\n bounding_box_dict = scan_bb_images(user_root, search_id)\n\n return render(request, 'gallery.html',\n {\"object_id_list\": object_id_list,\n \"image_dir\": image_dir,\n \"bounding_box\": bounding_box_dict,\n \"flag_scan\": flag_scan,\n \"flag_classifier\":flag_classifier,\n \"image_type_list\":image_type_list})", "def deleted_files(conn, scan0, scan1):\n return get_new_files(conn, scan1, scan0)", "def retrieve_images(search_term,count=200,thumbnail=True):\r\n \r\n if (thumbnail==True): # Wait times will be less if the user wants the thumbnails.\r\n iw = 2\r\n panel_wait = 2\r\n else: # or else wait for more time for proper images to be extracted.\r\n iw = 100\r\n panel_wait = 2000\r\n \r\n folder_name = search_term # The search term will also be the name of the folder.\r\n if not os.path.exists(folder_name): # Make a folder in that name if it doesn't exist.\r\n os.mkdir(folder_name)\r\n \r\n search_term = '+'.join(search_term.split())\r\n url = \"https://www.google.co.in/search?q=\"+search_term+\"&source=lnms&tbm=isch\" # Create the search query\r\n driver = webdriver.Chrome() # choosing Chrome browser. Make sure the Chrome driver is in the same directory.\r\n driver.maximize_window()\r\n driver.get(url)\r\n\r\n\r\n for _ in range(260): # Scroll to the bottom of the search, displying all the search images.\r\n driver.execute_script(\"window.scrollBy(0,10000)\")\r\n element = driver.find_element_by_xpath('//*[@id=\"islmp\"]/div/div/div/div/div[5]/input') # Click \"Show more results\"\r\n if element.is_displayed():\r\n element.click()\r\n\r\n divs = driver.find_elements_by_xpath(\"//*[@id='islrg']/div[1]/div/a[1]/div[1]/img\") # The xpath for all searched images.\r\n if count>len(divs): # If count is more than the number of images, download the max number of images.\r\n count = len(divs)\r\n\r\n wait = WebDriverWait(driver,10) # Set explicit wait.\r\n actions = ActionChains(driver) # Set action chain variable.\r\n \r\n # Move to each of the images and wait for sometime.\r\n actions.move_to_element(driver.find_element_by_xpath(r\"//*[@id='islrg']/div[1]/div[1]/a[1]/div[1]/img\")).perform()\r\n driver.implicitly_wait(iw)\r\n for i in range(1,count+1):\r\n try: # Find image elements.\r\n search_image = wait.until(EC.element_to_be_clickable((By.XPATH,r\"//*[@id='islrg']/div[1]/div[\"+str(i)+\"]/a[1]/div[1]/img\")))\r\n driver.implicitly_wait(iw)\r\n driver.execute_script(\"arguments[0].click();\",search_image) # Click on each of the images.\r\n except:\r\n continue\r\n \r\n \r\n try: # Extract images from the black image panel (div).\r\n panel = wait.until(EC.element_to_be_clickable((By.XPATH,r'//*[@id=\"Sva75c\"]/div/div/div[3]/div[2]/c-wiz/div[1]/div[1]/div/div[2]/a/img')))\r\n time.sleep(iw/50) # Wait for the panel to load.\r\n img = urllib.request.urlopen(urllib.request.Request(panel.get_attribute('src'))).read() # Extract the image.\r\n except:\r\n try: # or extract thumbnails if the above fails.\r\n search_image = driver.find_element_by_xpath(r\"//*[@id='islrg']/div[1]/div[\"+str(i)+\"]/a[1]/div[1]/img\")\r\n #print(i,search_image.get_attribute(\"src\"))\r\n if search_image.get_attribute(\"src\") != None:\r\n img = urllib.request.urlopen(urllib.request.Request(search_image.get_attribute('src'))).read()\r\n else:\r\n img = urllib.request.urlopen(urllib.request.Request(search_image.get_attribute('data-src'))).read()\r\n except:\r\n continue\r\n \r\n f = open(os.getcwd()+'\\\\'+folder_name+'\\\\'+folder_name+str(i)+'.jpg',\"wb\") # Save the image as jpeg.\r\n f.write(img)\r\n f.close()\r\n \r\n driver.close()", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = apiclient.discovery.build('drive', 'v3', http=http)\n\n sheet_id = raw_input(\"Input sheet id: \")\n sheet = sefaria_sheet_api(sheet_id)\n\n file_metadata = {\n 'name': sheet.get('title', '').strip(),\n 'mimeType': 'application/vnd.google-apps.document'\n }\n\n media = apiclient.http.MediaIoBaseUpload(\n StringIO.StringIO(create_html_string(sheet)),\n mimetype='text/html',\n resumable=True)\n\n new_file = service.files().create(body=file_metadata,\n media_body=media,\n fields='id,webViewLink').execute()\n\n print '{} (id: {})'.format(new_file['webViewLink'], new_file['id'])", "def test_get_photos(self):\n recipe = Recipes.objects.create(chef=self.user, draft=False, private=False)\n photo = Photos.objects.create(recipe=recipe, photo_order=1)\n\n url = '/0/chefs/%i/photos' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('photos', resp.data)\n self.assertEqual(1, len(resp.data['photos']))\n keys = ('edit_date', 'creation_date', 'id', u'temperature', 'url', 'recipe', 'cover',\n 'time', 'instructions', 'order', 'quantity')\n self.assertEqual(set(keys), set(resp.data['photos'][0].keys()))", "def __file_exists(self, file_name=None):\r\n page_token = None\r\n file_name = 'name=\\'' + os.path.basename(self.file_name) + '\\''\r\n self.drive_api = build('drive', 'v3', credentials=self.creds)\r\n\r\n try:\r\n while True:\r\n response = self.drive_api.files().list(q=file_name, includeTeamDriveItems=True, supportsTeamDrives=True, fields='nextPageToken, files(id, name)', pageToken=page_token).execute()\r\n for file in response.get('files', []):\r\n self.file_id = file.get('id')\r\n self.file_exists = True\r\n # Process change\r\n print('Found file: %s (%s)' % (file.get('name'), file.get('id')))\r\n page_token = response.get('nextPageToken', None)\r\n if page_token is None:\r\n break\r\n except Error as ge:\r\n logging.error('Google Drive Exception: ' + ge.content)\r\n print(ge)", "def get_latest_photos(self, count = 30, page = 1):\n uri = 'photos/latest'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)" ]
[ "0.6759736", "0.63333344", "0.6305266", "0.619666", "0.6082265", "0.59544337", "0.58340144", "0.5815916", "0.57943195", "0.5779238", "0.5751527", "0.56643534", "0.5400818", "0.53769857", "0.53399146", "0.52896446", "0.5244849", "0.5224103", "0.5210007", "0.51904804", "0.5161478", "0.5146851", "0.51357174", "0.51352364", "0.5120207", "0.5097414", "0.5089831", "0.5088316", "0.50661784", "0.50617236" ]
0.6790877
0
Linearly interpolate two setpoints.
def interpolate_setpoints(base_setpoint, other_setpoint, parameter): time = parameter * base_setpoint.time + (1 - parameter) * other_setpoint.time position = parameter * base_setpoint.position + (1 - parameter) * other_setpoint.position velocity = parameter * base_setpoint.velocity + (1 - parameter) * other_setpoint.velocity return Setpoint(time, position, velocity)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interpolate(x0, y0, x1, y1, x):\n y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)\n\n return y", "def linear_interpolation(self, pt1, pt2, unknown):\n\n #Write your code for linear interpolation here\n pt1,intensity1=pt1\n pt2,intensity2=pt2\n newPoint=unknown\n intensity_diff=pt2-pt1\n if(intensity_diff<=0):\n intensity_diff=1\n\n a1=pt2-newPoint\n b1=a1/intensity_diff\n x=intensity1*b1\n a2=newPoint - pt1\n b2=a2/intensity_diff\n y=intensity2*b2\n new_intensity=x+y\n\n return new_intensity", "def linear_interpolate(x, x0, y0, x1, y1):\n try:\n return (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)\n except ZeroDivisionError:\n return 0.0", "def _LinearInterpolate(x0, target, x1, y0, y1):\n if x0 == x1:\n return (y0 + y1) / 2\n return (y1 - y0) * (target - x0) / (x1 - x0) + y0", "def interpolate(self, xs):\n tck = splrep(self._xs, self._ys)\n new_ys = splev(xs, tck, der=0)\n return new_ys", "def test_linear_interpolation_range(self):\n\n for x in [[1.0, 2.0, 4.0], [-20, -19, 0], numpy.arange(200) + 1000]:\n for y in [[5.0, 9.0], [100, 200, 10000]]:\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Test that linearly interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 100)\n etas = numpy.linspace(y[0], y[-1], 100)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)", "def interpolate(self, s1, s2, num_points):\n edge = [tuple(s1)]\n length = self.distance_bw_states(s1, s2)\n d_l = length/num_points\n curr_len = d_l\n while True:\n #Do linear interpolation\n temp = [s1[i] + (s2[i] - s1[i])*(curr_len/length) for i in range(self.ndims)]\n #Update curr_len\n curr_len+= d_l\n if curr_len > length: break\n #Append temp to edge\n edge.append(tuple(temp))\n #Add final state to edge\n edge.append(tuple(s2))\n return edge", "def interpolate(i0, d0, i1, d1):\n if i0 == i1:\n return [d0]\n values = []\n a = (d1 - d0) / (i1 - i0)\n d = d0\n for i in range(i0,i1+1):\n values.append(d)\n d = d + a\n return values", "def test_linear_interpolation_basic(self):\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Test first that original points are reproduced correctly\n for i, xi in enumerate(x):\n for j, eta in enumerate(y):\n val = interpolate2d(x, y, A, [(xi, eta)], mode='linear')[0]\n ref = linear_function(xi, eta)\n assert numpy.allclose(val, ref, rtol=1e-12, atol=1e-12)\n\n # Then test that genuinly interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 10)\n etas = numpy.linspace(y[0], y[-1], 10)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)", "def linear_interp(x,y,xi) :\n \n f = interp1d(x,y,kind='linear')\n yi = f(xi)\n \n return yi", "def _interpolate(self, kps1: List[List[kp]], kps2: List[List[kp]]) -> np.ndarray:\n interpolated_kps = []\n for i in range(len(kps1)):\n # If one of the two points is empty -> Not interpolate\n if len(kps1[i]) != 0 and len(kps2[i]) != 0:\n interpolated_coords = np.linspace(np.array(kps1[i]), np.array(kps2[i]), num=3).tolist()\n interpolated_kps.append(interpolated_coords[1])\n else:\n interpolated_kps.append([None, None, None])\n return np.array(interpolated_kps)", "def linear_interpolation(left, right, alpha):\n\n return left + alpha * (right - left)", "def interpolate(x1, x2, u, N):\n \n # finding the magnitude of each component\n a1 = np.matmul(x1, u)\n a2 = np.matmul(x2, u)\n\n ims = [np.matmul(u, t * a1 + (1 - t) * a2) \\\n for t in np.linspace(0, 1, N)]\n\n return np.stack(ims, 0)", "def linear_interpolate_value_change(t0, v0, t1, v1, dt):\n return (v1 - v0)/float(t1-t0) * dt", "def interpolate(Position_i1, Position_i2, inBetween):\r\n \r\n # Chain the timestamps together and calculate the delta t\r\n T1 = Position_i1[0][:5] + [int(Position_i1[0][5])] + [int((Position_i1[0][5]%1)*1000000)] \r\n T2 = Position_i2[0][:5] + [int(Position_i2[0][5])] + [int((Position_i2[0][5]%1)*1000000)]\r\n dt = dT(T1, T2)\r\n\r\n # Do the same with longitude and latitude.\r\n ds = [i[1]- i[0] for i in zip(Position_i1[1], Position_i2[1])]\r\n \r\n # Calculate the \"slope\":\r\n ds_dt = [i/dt for i in ds]\r\n \r\n # Make the timeStamp for (inBetween) more precise:\r\n precise_Time = inBetween[:5] + [int(inBetween[5])] + [int((inBetween[5]%1)*1000000)]\r\n \r\n # Calculate the time since the first measurement passed till \"inBetween\"\r\n DeltaT = dT(T1, precise_Time)\r\n\r\n # Calculate the precise position at \"inBetween\":\r\n Position = [DeltaT* i[0] + i[1] for i in zip(ds_dt, Position_i1[1])]\r\n\r\n # Then return the [timeStamp, [lon, lat]]\r\n return [inBetween, Position]", "def compute_lerp(a, b, x):\n\n return a + x * (b - a)", "def interpolate_linear(self, transect):\n\n u = np.copy(self.u_mps)\n v = np.copy(self.v_mps)\n\n valid = np.isnan(u) == False\n\n # Check for valid data\n if sum(valid) > 1 and sum(self.valid_data[0, :]) > 1:\n\n # Compute ens_time\n ens_time = np.nancumsum(transect.date_time.ens_duration_sec)\n\n # Apply linear interpolation\n self.u_processed_mps = np.interp(x=ens_time,\n xp=ens_time[self.valid_data[0, :]],\n fp=u[self.valid_data[0, :]],\n left=np.nan,\n right=np.nan)\n # Apply linear interpolation\n self.v_processed_mps = np.interp(x=ens_time,\n xp=ens_time[self.valid_data[0, :]],\n fp=v[self.valid_data[0, :]],\n left=np.nan,\n right=np.nan)", "def _lerp(a, b, t, out=None):\n diff_b_a = subtract(b, a)\n # asanyarray is a stop-gap until gh-13105\n lerp_interpolation = asanyarray(add(a, diff_b_a*t, out=out))\n subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t>=0.5)\n if lerp_interpolation.ndim == 0 and out is None:\n lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays\n return lerp_interpolation", "def linear_interpolate_value_at_time(t0, v0, t1, v1, t):\n return v0 + linear_interpolate_value_change(t0, v0, t1, v1, t - t0)", "def lerp(a, b, t):\n return (1 - t) * a + t * b", "def interpolate(self, interp):\n x = np.linspace(0, 29, len(self.ya))\n f_ya = interpolate.interp1d(x, self.ya)\n f_yv = interpolate.interp1d(x, self.yv)\n f_pa = interpolate.interp1d(x, np.reshape(self.pa, [-1]))\n f_pv = interpolate.interp1d(x, np.reshape(self.pv, [-1]))\n\n x_interp = np.linspace(0, 29, len(self.ya)*interp)\n self.ya = list(f_ya(x_interp))\n self.yv = list(f_yv(x_interp))\n self.pa = list(f_pa(x_interp))\n self.pv = list(f_pv(x_interp))", "def interpolate(x_list, y_list, z_list):\n x1 = x_list[-2]\n x2 = x_list[-1]\n y1 = y_list[-2]\n y2 = y_list[-1]\n z1 = z_list[-2]\n z2 = z_list[-1]\n r = -y1/y2\n x_land = (x1+r*x2)/(r+1)\n z_land = (z1+r*z2)/(r+1)\n x_list[-1] = x_land\n y_list[-1] = 0.0\n z_list[-1] = z_land", "def interpol(self,x,y,x1):\n \n N = len(x)\n i = np.minimum(np.maximum(np.searchsorted(x,x1,side='right'),1),N-1)\n xl = x[i-1]\n xr = x[i]\n yl = y[i-1]\n yr = y[i]\n y1 = yl + (yr-yl)/(xr-xl) * (x1-xl)\n above = x1 > x[-1]\n below = x1 < x[0]\n y1 = np.where(above,y[-1] + (x1 - x[-1]) * (y[-1]-y[-2])/(x[-1]-x[-2]), y1)\n y1 = np.where(below,y[0],y1)\n \n return y1, i", "def lin_int(xs, ys):\n return scipy.interpolate.interp1d(xs, ys)", "def interpolate(a, b):\n x = 1\n i = 1\n f = b[0]\n while i < n:\n b = b*(x-a[i])\n i += 1\n f += (b[i] - f(a[i]))/a[i]) * b\n return f", "def interpolate(self, mobject1, mobject2, alpha, path_func=straight_path()):\n self.set_points(path_func(mobject1.points, mobject2.points, alpha))\n return self", "def _lerp(self, start_value, end_value):\n # @todo: can probably replace this with np.interp(self.step_lerp_pcts, [0, 1], [start_value, end_value])\n return (1.0-self.step_lerp_pcts)*start_value + self.step_lerp_pcts*end_value", "def interpolate(self, a, b, t):\n d1 = (t - a[0]).seconds\n d2 = (b[0] - t).seconds\n # The total time difference\n d = float(d1 + d2)\n point = []\n # Need to return a (time, lat, lon, elev) point\n point.append(t)\n # Linear interpolation of the latitude, longitude, and elevation\n point.append(float(a[1])*(d2/d) + float(b[1])*(d1/d))\n point.append(float(a[2])*(d2/d) + float(b[2])*(d1/d))\n point.append(float(a[3])*(d2/d) + float(b[3])*(d1/d))\n if self.verbose:\n sys.stderr.write('Interpolate:\\n')\n sys.stderr.write('\\t%s\\n' % repr(a))\n sys.stderr.write('\\t%s\\n' % repr(point))\n sys.stderr.write('\\t%s\\n' % repr(b))\n return point", "def lerp(self, other, bias):\n ox, oy = other\n b1 = 1.0 - bias\n return tuple.__new__(Vec2,\n (self[0] * b1 + ox * bias, self[1] * b1 + oy * bias))", "def interp_n2(t, x, y):\n\n return y[:, 0] + (t - x[0]) * (y[:, 1] - y[:, 0]) / (x[1] - x[0])" ]
[ "0.7063411", "0.70630324", "0.7000477", "0.6974235", "0.67129767", "0.670686", "0.66866535", "0.66817236", "0.6674512", "0.6645032", "0.6635487", "0.6578317", "0.65730774", "0.6544798", "0.64888346", "0.6453485", "0.6449922", "0.6442387", "0.6439969", "0.6434852", "0.6425436", "0.641993", "0.63817036", "0.6376306", "0.6375314", "0.63668126", "0.6351598", "0.6334913", "0.6298026", "0.62464" ]
0.71087044
0
Calculate overlap between two sets. The sets are acquired from data1 and data2 respectively.
def calc_overlap(data1, data2, label1=None, label2=None, index='dice'): if label1 is not None: positions1 = np.where(data1 == label1) data1 = list(zip(*positions1)) if label2 is not None: positions2 = np.where(data2 == label2) data2 = list(zip(*positions2)) # calculate overlap overlap = _overlap(data1, data2, index) return overlap
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cal_overlaps(boxes1, boxes2):\n area1 = (boxes1[:, 0] - boxes1[:, 2]) * (boxes1[:, 1] - boxes1[:, 3]) # (Nsample, 1)\n area2 = (boxes2[:, 0] - boxes2[:, 2]) * (boxes2[:, 1] - boxes2[:, 3]) # (Msample, 1)\n\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) # (Nsample, Msample)\n\n # calculate the intersection of boxes1(anchor) and boxes2(GT box)\n for i in range(boxes1.shape[0]):\n overlaps[i][:] = cal_iou(boxes1[i], area1[i], boxes2, area2)\n\n return overlaps", "def compute_overlap(self, skymap1, skymap2, single_skymap1, single_skymap2):\n from ligo.skymap.postprocess.crossmatch import crossmatch\n from astropy.coordinates import SkyCoord\n ra, dec = self.get_ra_dec_from_skymap(single_skymap1)\n coord = SkyCoord(ra, dec, unit=\"rad\")\n result = crossmatch(skymap2, coord)\n searched_prob_1 = np.min([result.searched_prob, 1.0])\n ra, dec = self.get_ra_dec_from_skymap(single_skymap2)\n coord = SkyCoord(ra, dec, unit=\"rad\")\n result = crossmatch(skymap1, coord)\n searched_prob_2 = np.min([result.searched_prob, 1.0])\n return np.max([1-searched_prob_1, 1-searched_prob_2])", "def get_synset_overlap(sentence_a, sentence_b):\n def synsets(word):\n sense_lemmas = []\n for pos in ('n'):#,'a'):\n for i in xrange(5):\n try:\n sense_lemmas += [lemma.name \n for lemma in wn.synset('{0}.{1}.0{2}'.format(word, pos, i)).lemmas]\n except WordNetError: \n pass\n return sense_lemmas\n\n a_set = set(lemma for word in sentence_a for lemma in synsets(word))\n b_set = set(lemma for word in sentence_b for lemma in synsets(word))\n score = len(a_set&b_set)/float(len(a_set|b_set))\n \n return score", "def calculate_overlap(self, r1, r2):\n\n # We know that reads that can be glued,\n # share at least half of their length.\n # Make sure one is not shorter than\n # the half of another.\n\n if len(r1) / 2 + len(r1) % 2 <= len(r2) \\\n and len(r2) / 2 + len(r2) % 2 <= len(r1):\n\n # prepare second halves for overlap pre-check\n\n tail1 = r1[len(r1) / 2:]\n tail2 = r2[len(r2) / 2:]\n \n # case 1: r1 contains r2 completely\n #\n # For example,\n #\n # ATCGCCGGAT\n # TCGCCGGA\n \n pos = r1.find(r2)\n if pos != -1:\n self.reads[r1].overlaps[r2] = pos + len(r2) - len(r1)\n \n # case 2: r2 contains r1 completely\n #\n # For example,\n #\n # TCGCCGGA\n # ATCGCCGGAT\n \n pos = r2.find(r1)\n if pos != -1:\n self.reads[r2].overlaps[r1] = pos + len(r1) - len(r2)\n \n # case 3: end of r1 overlaps with beginning of r2\n #\n # For example,\n #\n # ATCGCCGGAT\n # TCGCCGGATGC\n #\n # First check that at least half of r1 is in r2\n # If there is a match, calculate the expected length \n # of overlap and check if they indeed overlap.\n\n \n pos = r2.find(tail1)\n if pos != -1:\n overlap = pos + len(tail1)\n if r1[-overlap:] == r2[:overlap]:\n self.reads[r1].overlaps[r2] = len(r2) - overlap\n \n # case 4: end of r2 overlaps with beginning of r1\n #\n # For example,\n #\n # CGCCGGATCC\n # TCGCCGGAT\n #\n # First check that at least half of r2 is in r1\n # If there is a match, calculate the expected length \n # of overlap and check if they indeed overlap.\n \n pos = r1.find(tail2)\n if pos != -1: \n overlap = pos + len(tail2)\n if r2[-overlap:] == r1[:overlap]:\n self.reads[r2].overlaps[r1] = len(r1) - overlap", "def overlap(table1, table2):\n out = np.zeros(np.size(table1, axis=0), dtype='bool')\n for i in range(np.size(table1, axis=0)):\n s1_s2 = table1[i, 0] < table2[:, 0] \n s1_e2 = table1[i, 0] <= table2[:, 1]\n e1_s2 = table1[i, 1] < table2[:, 0]\n e1_e2 = table1[i, 1] < table2[:, 1]\n # no overlap occurs when all four parameters above either == 0 or 1\n sum_params = np.sum(np.array([s1_s2, s1_e2, e1_s2, e1_e2]), axis=0)\n olap = (sum_params == 1) | (sum_params == 2) | (sum_params == 3)\n out[i] = np.any(olap)\n return out", "def _overlap(c1, c2, index='dice'):\n set1 = set(c1)\n set2 = set(c2)\n intersection_num = float(len(set1 & set2))\n try:\n if index == 'dice':\n total_num = len(set1 | set2) + intersection_num\n overlap = 2.0 * intersection_num / total_num\n elif index == 'percent':\n overlap = 1.0 * intersection_num / len(set1)\n else:\n raise Exception(\"Only support 'dice' and 'percent' as overlap indices at present.\")\n except ZeroDivisionError as e:\n print(e)\n overlap = np.nan\n return overlap", "def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps", "def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps", "def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps", "def overlap_with(self, other):", "def overlaps(a, b, **kwargs):\n return lib.overlaps(a, b, **kwargs)", "def get_overlap(a, b):\n return max(0, min(a[1], b[1]) - max(a[0], b[0]))", "def test_compute_overlap(self):\n # box1 contained in box2\n box1 = ((1, 2), (1, 2), (1, 2))\n box2 = ((1, 3), (1, 3), (1, 3))\n mapping = {box1: [1, 2, 3, 4], box2: [1, 2, 3, 4, 5]}\n # box1 in box2, so complete overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box1, box2), 1)\n # 4/5 atoms in box2 in box1, so 80 % overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box2, box1), .8)", "def find_overlap(self, gdf1, gdf2):\n gdf1.crs = \"epsg:4326\" # todo: fix this ugliness\n gdf2.crs = \"epsg:4326\"\n return gpd.sjoin(gdf1, gdf2).drop(\"index_right\", axis=1)", "def overlaps(self, other):\n pass", "def overlap(t1, t2):\n t1 = dict(min=np.min(t1), max=np.max(t1))\n t2 = dict(min=np.min(t2), max=np.max(t2))\n for t in (t1, t2):\n t['dur'] = t['max'] - t['min']\n\n # Ensure t1 min < t2 min\n if t2['min'] < t1['min']:\n print('t2 starts earlier')\n t1, t2 = t2, t1\n \n # var names wrt t2\n min_inside = t2['min'] >= t1['min'] and t2['min'] <= t1['max']\n max_inside = t2['max'] <= t1['max']\n if min_inside and max_inside:\n # t2 completely contained by t1\n return (t2['min'], t2['max'])\n elif min_inside:\n # t2 partially contained by t1\n return (t2['min'], t1['max'])\n else:\n # no overlap\n return (None, None)", "def compute_overlaps_masks(masks1, masks2):\n \n # If either set of masks is empty return empty result\n if masks1.shape[-1] == 0 or masks2.shape[-1] == 0:\n return np.zeros((masks1.shape[-1], masks2.shape[-1]))\n # flatten masks and compute their areas\n masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32)\n masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32)\n \n #a, _= masks1.shape\n #_,b = masks2.shape\n #masks2 = cv2.resize(masks2, (b,a))\n #x = np.arange(3).reshape(1,3)\n #y = np.arange(3,6).reshape(1,3)\n\n #masks1 = y.reshape(3,1)\n #print(\"masks1:\", masks1.shape)\n #print(\"masks2:\", masks2.shape)\n #resize_mask()\n area1 = np.sum(masks1, axis=0)\n area2 = np.sum(masks2, axis=0)\n\n # intersections and union\n intersections = np.dot(masks1.T, masks2)\n union = area1[:, None] + area2[None, :] - intersections\n overlaps = intersections / union\n\n return overlaps", "def find_intersection(set_1, set_2):\n\n # PyTorch auto-broadcasts singleton dimensions\n lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].unsqueeze(0)) # (n1, n2, 2)\n upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].unsqueeze(0)) # (n1, n2, 2)\n intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) # (n1, n2, 2)\n return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] # (n1, n2)", "def find_intersection(set_1, set_2):\n\n # PyTorch auto-broadcasts singleton dimensions\n lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].unsqueeze(0)) # (n1, n2, 2)\n upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].unsqueeze(0)) # (n1, n2, 2)\n intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) # (n1, n2, 2)\n return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] # (n1, n2)", "def find_intersection(set_1, set_2):\n\n # PyTorch auto-broadcasts singleton dimensions\n lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].unsqueeze(0)) # (n1, n2, 2)\n upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].unsqueeze(0)) # (n1, n2, 2)\n intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) # (n1, n2, 2)\n return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] # (n1, n2)", "def find_intersection(set_1, set_2):\n\n # PyTorch auto-broadcasts singleton dimensions\n lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].unsqueeze(0)) # (n1, n2, 2)\n upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].unsqueeze(0)) # (n1, n2, 2)\n intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) # (n1, n2, 2)\n return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] # (n1, n2)", "def find_intersection(set_1, set_2):\n # PyTorch auto-broadcasts singleton dimensions\n lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].unsqueeze(0)) # (n1, n2, 2)\n upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].unsqueeze(0)) # (n1, n2, 2)\n intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) # (n1, n2, 2)\n return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] # (n1, n2)", "def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)", "def range_overlap(range1, range2):\n return range(max(range1[0], range2[0]), min(range1[1], range2[1]))", "def overlap(t1start, t1end, t2start, t2end):\n\n return (t1start <= t2start <= t1end) or (t2start <= t1start <= t2end)", "def overlap(start_idx1, end_idx1, start_idx2, end_idx2):\n head = min(end_idx1, end_idx2)\n tail = max(start_idx1, start_idx2)\n return head >= tail", "def poverlap(t1, t2, size1, size2):\n x0 = t1[0]\n y0 = t1[1]\n x1 = t1[0] + size1[0]\n y1 = t1[1] + size1[1]\n\n x2 = t2[0]\n y2 = t2[1]\n x3 = t2[0] + size2[0]\n y3 = t2[1] + size2[1]\n \n ol = max(0, min(x1, x3) - max(x0, x2)) * max(0, min(y1, y3) - max(y0, y2))\n\n return ol / float(2*(size2[0]*size2[1]) - ol)", "def overlap(self, a, b):\n return np.maximum(a, b)", "def overlaps(self, other):\n return _binary_op(arctern.ST_Overlaps, self, other).astype(bool, copy=False)", "def get_overlap(self, other):\n return self.intersection_over_union(other)" ]
[ "0.7035773", "0.68398887", "0.67753077", "0.6746841", "0.6734138", "0.67296", "0.6612416", "0.6612416", "0.6612416", "0.6592445", "0.6574849", "0.6571463", "0.6548481", "0.6531469", "0.6530407", "0.64822507", "0.64153165", "0.6390756", "0.6390756", "0.6390756", "0.6390756", "0.6369392", "0.6360544", "0.6341603", "0.63157654", "0.63129944", "0.6310854", "0.62800795", "0.62786376", "0.62783515" ]
0.7206123
0
Calculate overlaps for leaveoneout cross validation. Each sample has its own region of interest (ROI). For each iteration, overlap between the ROI in the left sample and the ROI in remaining samples
def loocv_overlap(X, prob, metric='dice'): assert X.ndim == 2, 'The input X must be a 2D array!' assert X.dtype == np.bool, "The input X's data type must be bool!" n_samp, _ = X.shape remain_idx_arr = np.ones((n_samp,), dtype=np.bool) overlaps = np.zeros((n_samp,)) for left_idx in range(n_samp): # get roi of the left sample roi_left = np.where(X[left_idx])[0] # get roi of the remaining samples remain_idx_arr[left_idx] = False prob_map = np.mean(X[remain_idx_arr], 0) roi_remain = np.where(prob_map > prob)[0] remain_idx_arr[left_idx] = True # calculate overlap overlaps[left_idx] = calc_overlap(roi_left, roi_remain, index=metric) return overlaps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cal_overlaps(boxes1, boxes2):\n area1 = (boxes1[:, 0] - boxes1[:, 2]) * (boxes1[:, 1] - boxes1[:, 3]) # (Nsample, 1)\n area2 = (boxes2[:, 0] - boxes2[:, 2]) * (boxes2[:, 1] - boxes2[:, 3]) # (Msample, 1)\n\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) # (Nsample, Msample)\n\n # calculate the intersection of boxes1(anchor) and boxes2(GT box)\n for i in range(boxes1.shape[0]):\n overlaps[i][:] = cal_iou(boxes1[i], area1[i], boxes2, area2)\n\n return overlaps", "def overlapping(y_true, y_pred, inds_inside):\n overlaps = overlap(y_pred, y_true[:, :4])\n\n argmax_overlaps_inds = keras.backend.argmax(overlaps, axis=1)\n\n gt_argmax_overlaps_inds = keras.backend.argmax(overlaps, axis=0)\n\n indices = keras.backend.stack(\n [\n tensorflow.range(keras.backend.shape(inds_inside)[0]),\n keras.backend.cast(argmax_overlaps_inds, tensorflow.int32)\n ]\n )\n\n indices = keras.backend.transpose(indices)\n\n max_overlaps = tensorflow.gather_nd(overlaps, indices)\n\n return argmax_overlaps_inds, max_overlaps, gt_argmax_overlaps_inds", "def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps", "def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps", "def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps", "def overlap(table1, table2):\n out = np.zeros(np.size(table1, axis=0), dtype='bool')\n for i in range(np.size(table1, axis=0)):\n s1_s2 = table1[i, 0] < table2[:, 0] \n s1_e2 = table1[i, 0] <= table2[:, 1]\n e1_s2 = table1[i, 1] < table2[:, 0]\n e1_e2 = table1[i, 1] < table2[:, 1]\n # no overlap occurs when all four parameters above either == 0 or 1\n sum_params = np.sum(np.array([s1_s2, s1_e2, e1_s2, e1_e2]), axis=0)\n olap = (sum_params == 1) | (sum_params == 2) | (sum_params == 3)\n out[i] = np.any(olap)\n return out", "def check_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_overlaps - something went wrong... no data?\")\n pass", "def filter_overlapped_boxes(annotations, iou_thr=0.5):\n new_annotations = dict()\n for img_id, annos in annotations.items(): # loop through images\n annos = np.array(annos).astype(np.float32)\n unique_clses = np.unique(annos[:, 4])\n new_img_boxes = []\n for cls in unique_clses: # loop through classes\n idxes = np.where(annos[:, 4] == cls)[0]\n cls_annos = annos[idxes]\n x1, x2 = cls_annos[:, 0], cls_annos[:, 2]\n y1, y2 = cls_annos[:, 1], cls_annos[:, 3]\n\n areas = (x2 - x1) * (y2 - y1)\n order = np.arange(idxes.shape[0])\n new_cls_boxes = []\n while order.size > 0:\n i = order[0]\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1)\n h = np.maximum(0.0, yy2 - yy1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n # merge overlap boxes\n inds = np.where(ovr > iou_thr)[0]\n overlap_boxes = np.vstack([cls_annos[i:i+1, :],\n cls_annos[order[inds + 1], :]])\n new_cls_boxes.append(np.mean(overlap_boxes, axis=0))\n\n # update order\n inds = np.where(ovr <= iou_thr)[0]\n order = order[inds + 1]\n new_img_boxes.extend(new_cls_boxes)\n new_annotations[img_id] = np.array(new_img_boxes, dtype=np.float32)\n\n return new_annotations", "def do_overlap(ds,iterno,algo=\"FordRollett\",ignore=1,unit_weights=False,top=None,bottom=None,\n exact_angles=None,drop_frames='',drop_tubes = '', use_gains = [],do_sum=False,\n do_interp = False, dumpfile=None):\n import time\n from Reduction import overlap,interpolate\n # Get sensible values\n if top is None: top = ds.shape[1]-1\n if bottom is None: bottom = 0\n\n # Vertically integrate\n # Dimensions are step,vertical,tube\n\n b = ds[:,bottom:top,:].intg(axis=1).get_reduced()\n\n # Determine pixels per tube interval\n\n tube_pos = ds.axes[-1]\n if tube_pos.ndim == 2: #very old data, just take one slice\n tube_pos = tube_pos[0]\n tubesep = abs(tube_pos[0]-tube_pos[-1])/(len(tube_pos)-1)\n tube_steps = ds.axes[0]\n bin_size = abs(tube_steps[0]-tube_steps[-1])/(len(tube_steps)-1)\n pixel_step = int(round(tubesep/bin_size))\n bin_size = tubesep/pixel_step\n print '%f tube separation, %d steps before overlap, ideal binsize %f' % (tubesep,pixel_step,bin_size)\n dropped_frames = parse_ignore_spec(drop_frames)\n dropped_tubes = parse_ignore_spec(drop_tubes)\n\n # Drop frames from the end as far as we can\n\n for empty_no in range(b.shape[0]-1,0,-1):\n print \"Trying %d\" % empty_no\n if empty_no not in dropped_frames:\n break\n dropped_frames.remove(empty_no)\n print \"All frames after %d empty so dropped\" % empty_no\n b = b[:empty_no+1]\n\n # Do we need to add dummy missing frames?\n\n extra_steps = b.shape[0]%pixel_step\n if extra_steps > 0:\n start_drop = b.shape[0]\n # gumpy has no resize\n new_b = zeros([((b.shape[0]/pixel_step)+1)*pixel_step,b.shape[1]])\n new_b[:b.shape[0]] = b\n b = new_b\n extra_dropped_frames = range(start_drop,b.shape[0])\n print \"Filled out array from %d to %d with dummy frames\" % (start_drop,b.shape[0])\n dropped_frames |= set(extra_dropped_frames)\n else:\n extra_dropped_frames = []\n \n # Zero out dropped frames\n\n print 'Dropped frames: ' + `dropped_frames`\n b_zeroed = copy(b)\n\n # Make a simple array to work out which sectors are missing frames\n\n frame_check = array.ones(b.shape[0])\n\n # Zero out all matching steps\n\n all_zeroed = copy(b)\n region_starts = [a*pixel_step for a in range(b.shape[0]/pixel_step)]\n for frame_no in dropped_frames:\n b_zeroed[frame_no] = 0\n b_zeroed.var[frame_no] = 0\n dropped_step = frame_no%pixel_step\n ref_drop_steps = [r+dropped_step for r in region_starts]\n for drop_step in ref_drop_steps:\n frame_check[drop_step] = 0\n all_zeroed[drop_step] = 0\n all_zeroed.var[drop_step] = 0\n\n # Now drop out whole detectors\n\n for tube_no in dropped_tubes:\n b_zeroed[:,tube_no] = 0\n b_zeroed.var[:,tube_no] = 0\n all_zeroed[:,tube_no] = 0\n all_zeroed.var[:,tube_no] = 0\n\n # Interpolation. If requested, we first interpolate the data onto a regular angular grid,\n # which is the assumption underlying the regain calculation. However, as the deviations\n # from regularity are usually minor, this step can usually be skipped\n \n if do_interp:\n if exact_angles != None:\n h_correction = read_horizontal_corrections(exact_angles)\n else:\n h_correction = None\n \n all_zeroed = interpolate.interpolate(all_zeroed,dropped_frames,tube_steps,tube_steps[0],\n bin_size,len(tube_pos),h_correction=h_correction)\n b_zeroed = interpolate.interpolate(b_zeroed,dropped_frames,tube_steps,tube_steps[0],\n bin_size,len(tube_pos),h_correction=h_correction)\n\n \n c = all_zeroed.reshape([b.shape[0]/pixel_step,pixel_step,b.shape[-1]])\n frame_check = frame_check.reshape([b.shape[0]/pixel_step,pixel_step])\n frame_sum = frame_check.intg(axis=1)\n print `b.shape` + \"->\" + `c.shape`\n print 'Relative no of frames: ' + `frame_sum`\n\n # Output the starting data for external use\n\n if dumpfile is not None:\n dump_tube_intensities(dumpfile,raw=b_zeroed)\n if len(use_gains)==0: #we have to calculate them\n if c.shape[0] == 1: #can't be done, there is no overlap\n return None,None,None,None,None\n if do_sum:\n # sum the individual unoverlapped sections. Reshape is required as the\n # intg function removes the dimension\n d = c.intg(axis=1).reshape([c.shape[0],1,c.shape[2]]) #array of [rangeno,stepno,tubeno]\n # normalise by the number of frames in each section\n else:\n d = c #no op\n # Note gumpy can't do transposes of more than two axes at once\n e = d.transpose((2,0)) #array of [tubeno,stepno,section]\n e = e.transpose((1,2)) #array of [tubeno,section,stepno]\n print \"Data shape: \" + repr(e.shape)\n print \"Check shape: \" + repr(frame_sum.shape)\n # create the mask: any values of zero are assumed to be incorrect and masked out\n pixel_mask = array.ones_like(e[ignore:])\n for one_tube in range(len(e[ignore:])):\n if not e[ignore+one_tube].any(): #all zero\n pixel_mask[one_tube] = 0 #mask it out\n gain,dd,interim_result,residual_map,chisquared,oldesds,first_ave,weights = \\\n iterate_data(e[ignore:],iter_no=iterno,unit_weights=unit_weights,pixel_mask=pixel_mask)\n else: #we have been provided with gains\n gain = use_gains\n chisquared=0.0\n # calculate errors based on full dataset\n # First get a full model\n reshape_ds = b_zeroed.reshape([b.shape[0]/pixel_step,pixel_step,b.shape[-1]])\n start_ds = reshape_ds.transpose((2,0))[ignore:] #array of [tubeno,stepno,section]\n start_ds = start_ds.transpose((1,2))\n start_var = start_ds.var\n\n # Our new pixel mask has to have all of the steps in\n\n pixel_mask = array.ones_like(start_ds)\n for one_tube in range(len(start_ds)):\n if not start_ds[one_tube].any(): #all zero\n pixel_mask[one_tube] = 0 #mask it out\n\n # Normalise gains so that average is 1.0\n\n gain = gain*len(gain)/gain.sum()\n model,wd,model_var,esds = overlap.apply_gain(start_ds,1.0/start_var,gain,\n calc_var=True,bad_steps=dropped_frames,pixel_mask=pixel_mask)\n\n # model and model_var have shape tubeno*pixel_step + no_steps (see shift_tube_add_new)\n\n print 'Have full model and errors at %f' % time.clock()\n\n # step size could be less than pixel_step if we have a short non-overlap scan\n\n real_step = pixel_step\n if len(tube_steps)< pixel_step:\n real_step = len(tube_steps)\n # and we have to prune the output data too\n holeless_model = zeros([real_step*start_ds.shape[0]])\n holeless_var = zeros_like(holeless_model)\n for tube_set in range(start_ds.shape[0]):\n holeless_model[tube_set*real_step:(tube_set+1)*real_step]=model[tube_set*pixel_step:(tube_set+1)*pixel_step] \n holeless_var[tube_set*real_step:(tube_set+1)*real_step]=model_var[tube_set*pixel_step:(tube_set+1)*pixel_step] \n model = holeless_model\n model_var = holeless_var\n cs = Dataset(model)\n cs.var = model_var\n\n # Now build up the important information\n\n cs.title = ds.title\n cs.copy_cif_metadata(ds)\n\n # construct the axes\n\n if exact_angles is None or do_interp:\n axis = arange(len(model))\n new_axis = axis*bin_size + ds.axes[0][0] + ignore*pixel_step*bin_size\n if not do_interp:\n axis_string = \"\"\"Following application of gain correction, two theta values were recalculated assuming a step size of %8.3f \n and a tube separation of %8.3f starting at %f.\"\"\" % (bin_size,tubesep,ds.axes[0][0]+ignore*pixel_step*bin_size)\n else:\n axis_string = \"\"\"Gain correction was performed after interpolating observed values onto a\n regular angular grid with a step size of %8.3f and a tube separation of %8.3f starting at %f.\"\"\" % (bin_size,tubesep,ds.axes[0][0]+ignore*pixel_step*bin_size)\n else:\n new_axis = calculate_average_angles(tube_steps,exact_angles,pixel_step,tubesep,\n extra_dummy=extra_dropped_frames)\n # Remove ignored tubes\n \n new_axis = new_axis[ignore*real_step:]\n \n axis_string = \\\n \"\"\"Following application of gain correction, two theta values were recalculated using a tube separation of \n%8.3f and the recorded positions of the lowest angle tube, and then adding an average of the \nangular corrections for the tubes contributing to each two theta position.\"\"\" % (tubesep)\n cs.set_axes([new_axis],anames=['Two theta'],aunits=['Degrees'])\n print 'New axis goes from %f to %f in %d steps' % (new_axis[0],new_axis[-1],len(new_axis))\n print 'Total %d points in output data' % len(cs)\n # prepare info for CIF file\n import math\n detno = map(lambda a:\"%d\" % a,range(len(gain)))\n gain_as_strings = map(lambda a:\"%.4f\" % a,gain)\n gain_esd = [\"%.4f\" % a for a in esds]\n cs.harvest_metadata(\"CIF\").AddCifItem((\n ((\"_[local]_detector_number\",\"_[local]_refined_gain\",\"_[local]_refined_gain_esd\"),),\n ((detno,gain_as_strings,gain_esd),))\n )\n if len(use_gains)==0:\n info_string = \"After vertical integration between pixels %d and %d,\" % (bottom,top) + \\\n \"\"\" individual tube gains were iteratively refined using the Ford/Rollett algorithm (Acta Cryst. (1968) B24,293). \n Final gains are stored in the _[local]_refined_gain loop.\"\"\" + axis_string\n else:\n info_string = \"After vertical integration between pixels %d and %d,\" % (bottom,top) + \\\n \" individual tube gains were corrected based on a previous iterative refinement using the Ford/Rollett algorithm. The gains used\" + \\\n \"are stored in the _[local]_refined_gain loop.\" + axis_string\n cs.add_metadata(\"_pd_proc_info_data_reduction\",info_string,append=True)\n return cs,gain,esds,chisquared,c.shape[0]", "def align(self, *, skip_corners=False, return_on_invalid_result=False, warpwarnings=False, **kwargs):\n #load the images for all HPFs and keep them in memory as long as\n #the AlignSample is active\n self.getDAPI()\n self.logger.info(\"starting alignment\")\n\n weighted_sum_mse = 0.\n sum_weights = 0.\n done = set()\n\n for i, overlap in enumerate(self.overlaps, start=1):\n if skip_corners and overlap.tag in [1,3,7,9] :\n continue\n self.logger.debug(f\"aligning overlap {overlap.n} ({i}/{len(self.overlaps)})\")\n result = None\n #check if the inverse overlap has already been aligned\n #(e.g. if the current overlap is between (1, 2), check the overlap between (2, 1))\n #if so, we don't have to align again\n if self.inverseoverlapsdictkey(overlap) in done:\n inverseoverlap = self.overlapsdict[self.inverseoverlapsdictkey(overlap)]\n if hasattr(inverseoverlap, \"result\"):\n result = overlap.getinversealignment(inverseoverlap)\n #do the alignment\n if result is None:\n result = overlap.align(gputhread=self.gputhread, gpufftdict=self.gpufftdict, **kwargs)\n done.add(self.overlapsdictkey(overlap))\n\n #contribution of the mean squared difference after alignment\n #to the weighted sum\n if result is not None and result.exit == 0: \n w = (overlap.cutimages[0].shape[0]*overlap.cutimages[0].shape[1])\n weighted_sum_mse+=w*result.mse[2]\n sum_weights+=w\n else :\n if result is None:\n reason = \"is None\"\n else:\n reason = f\"has exit status {result.exit}\"\n if return_on_invalid_result :\n if warpwarnings: self.logger.warningglobal(f'Overlap number {i} alignment result {reason}: returning 1e10!!')\n return 1e10\n else :\n if warpwarnings: self.logger.warningglobal(f'Overlap number {i} alignment result {reason}: adding 1e10 to sum_mse!!')\n w = (overlap.cutimages[0].shape[0]*overlap.cutimages[0].shape[1])\n weighted_sum_mse+=w*1e10\n sum_weights+=w\n\n self.logger.info(\"finished align loop for \"+self.SlideID)\n return weighted_sum_mse/sum_weights", "def check_recon_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'recon_spec'):\n for i, spectrum in enumerate(self.recon_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.recon_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.recon_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.recon_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.recon_spec[spectrum]._add_to_overlapping_filters(filtername)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass", "def test_compute_overlap(self):\n # box1 contained in box2\n box1 = ((1, 2), (1, 2), (1, 2))\n box2 = ((1, 3), (1, 3), (1, 3))\n mapping = {box1: [1, 2, 3, 4], box2: [1, 2, 3, 4, 5]}\n # box1 in box2, so complete overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box1, box2), 1)\n # 4/5 atoms in box2 in box1, so 80 % overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box2, box1), .8)", "def bbox_overlaps(bboxes1, bboxes2, mode='iou'):\n\n from icv.data.core.bbox import BBox\n assert mode in ['iou', 'iof']\n\n bboxes1 = np.array([np.array(b.bbox) if isinstance(b,BBox) else b for b in bboxes1])\n bboxes2 = np.array([np.array(b.bbox) if isinstance(b,BBox) else b for b in bboxes2])\n\n bboxes1 = bboxes1.astype(np.float32)\n bboxes2 = bboxes2.astype(np.float32)\n rows = bboxes1.shape[0]\n cols = bboxes2.shape[0]\n ious = np.zeros((rows, cols), dtype=np.float32)\n if rows * cols == 0:\n return ious\n exchange = False\n if bboxes1.shape[0] > bboxes2.shape[0]:\n bboxes1, bboxes2 = bboxes2, bboxes1\n ious = np.zeros((cols, rows), dtype=np.float32)\n exchange = True\n area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (\n bboxes1[:, 3] - bboxes1[:, 1] + 1)\n area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (\n bboxes2[:, 3] - bboxes2[:, 1] + 1)\n for i in range(bboxes1.shape[0]):\n x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])\n y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])\n x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])\n y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])\n overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum(\n y_end - y_start + 1, 0)\n if mode == 'iou':\n union = area1[i] + area2 - overlap\n else:\n union = area1[i] if not exchange else area2\n ious[i, :] = overlap / union\n if exchange:\n ious = ious.T\n return ious", "def calc_overlap(sample,ignore_zeros=False):\n overlap = sample.dot(sample.T)\n if ignore_zeros:\n countZeros = np.zeros((len(sample),len(sample),2))\n countZeros[:,:,0] = (sample==0).sum(1)[:,None]\n countZeros[:,:,1] = (sample==0).sum(1)[None,:]\n return overlap / (sample.shape[1]-countZeros.max(2))\n return overlap / sample.shape[1]", "def create_overlap_metric(anchor_boxes):\n y, x, h, w = np.transpose(anchor_boxes)\n ab_area = w * h\n y0 = y - h // 2\n x0 = x - w // 2\n y1 = y + h // 2\n x1 = x + w // 2\n\n def overlap(gt_boxes):\n overlaps = []\n for gt_box in gt_boxes:\n gt_y0, gt_x0, gt_y1, gt_x1 = gt_box\n int_y0 = np.maximum(gt_y0, y0)\n int_x0 = np.maximum(gt_x0, x0)\n int_y1 = np.minimum(gt_y1, y1)\n int_x1 = np.minimum(gt_x1, x1)\n int_area = np.maximum(0, int_x1 - int_x0) * np.maximum(0, int_y1 - int_y0)\n overlaps.append(int_area / ab_area)\n overlaps = np.transpose(overlaps)\n gt_indices = np.argmax(overlaps, axis=1)\n overlaps = np.squeeze(np.take_along_axis(overlaps, gt_indices[:, np.newaxis], axis=1))\n gt_boxes = np.take(gt_boxes, gt_indices, axis=0)\n return overlaps, gt_boxes\n return overlap", "def check_sim_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.sim_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.sim_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.sim_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.sim_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.sim_spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass", "def fix_non_sample_areas(overlap_metadata, direction=\"horizontal\"):\n g_nrow, g_ncol = overlap_metadata.shape[:2]\n metadata = np.copy(overlap_metadata)\n if direction == \"vertical\":\n for i in np.arange(g_nrow):\n i1 = i - 1\n i2 = i + 1\n for j in np.arange(g_ncol):\n (area, _) = overlap_metadata[i, j]\n j1 = j - 1\n j2 = j + 1\n if area == 0:\n area1 = 0\n if 0 <= j1 < g_ncol:\n (area1, side1) = overlap_metadata[i, j1]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n if 0 <= j2 < g_ncol:\n (area1, side1) = overlap_metadata[i, j2]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n if area1 == 0:\n if 0 <= i1 < g_nrow:\n (area1, side1) = overlap_metadata[i1, j]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n if 0 <= i2 < g_nrow:\n (area1, side1) = overlap_metadata[i2, j]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n # Run the same above routine but in reverse order.\n for i in np.arange(g_nrow - 1, -1, -1):\n i1 = i - 1\n i2 = i + 1\n for j in np.arange(g_ncol - 1, -1, -1):\n (area, _) = metadata[i, j]\n j1 = j - 1\n j2 = j + 1\n if area == 0:\n area1 = 0\n if 0 <= j1 < g_ncol:\n (area1, side1) = metadata[i, j1]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n if 0 <= j2 < g_ncol:\n (area1, side1) = metadata[i, j2]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n if area1 == 0:\n if 0 <= i1 < g_nrow:\n (area1, side1) = metadata[i1, j]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n if 0 <= i2 < g_nrow:\n (area1, side1) = metadata[i2, j]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n else:\n for j in np.arange(g_ncol):\n j1 = j - 1\n j2 = j + 1\n for i in np.arange(g_nrow):\n (area, _) = overlap_metadata[i, j]\n i1 = i - 1\n i2 = i + 1\n if area == 0:\n area1 = 0\n if 0 <= i1 < g_nrow:\n (area1, side1) = overlap_metadata[i1, j]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n if 0 <= i2 < g_nrow:\n (area1, side1) = overlap_metadata[i2, j]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n if area1 == 0:\n if 0 <= j1 < g_ncol:\n (area1, side1) = overlap_metadata[i, j1]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n if 0 <= j2 < g_ncol:\n (area1, side1) = overlap_metadata[i, j2]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n # Run the same above routine but in reverse order.\n for j in np.arange(g_ncol - 1, -1, -1):\n j1 = j - 1\n j2 = j + 1\n for i in np.arange(g_nrow - 1, -1, -1):\n (area, _) = metadata[i, j]\n i1 = i - 1\n i2 = i + 1\n if area == 0:\n area1 = 0\n if 0 <= i1 < g_nrow:\n (area1, side1) = metadata[i1, j]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n if 0 <= i2 < g_nrow:\n (area1, side1) = metadata[i2, j]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n if area1 == 0:\n if 0 <= j1 < g_ncol:\n (area1, side1) = metadata[i, j1]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n if 0 <= j2 < g_ncol:\n (area1, side1) = metadata[i, j2]\n if area1 != 0:\n metadata[i, j] = np.asarray([area1, side1])\n continue\n return metadata", "def test_idx_overlap():\n # Base array\n arr = np.arange(10)\n\n # Test subset overlap\n idx = u.idx_overlap(arr, np.arange(5, 8))\n assert len(idx) == 3\n\n # Test complete overlap\n idx = u.idx_overlap(arr, np.arange(-5, 20))\n assert len(idx) == 8\n\n # Test partial right overlap\n idx = u.idx_overlap(arr, np.arange(5, 20))\n assert len(idx) == 4\n\n # Test partial left overlap\n idx = u.idx_overlap(arr, np.arange(-5, 5))\n assert len(idx) == 4\n\n # Test no overlap\n idx = u.idx_overlap(arr, np.arange(10, 20))\n assert len(idx) == 0", "def test_outside_plus_inside(self):\n for region, bounds in load_region_bounds_dict().items():\n lon_bounds, lat_bounds = bounds\n for key in ['data01', 'ds_shift_lon', 'ds_rev_both', 'ds_irr_both']:\n outside_data = climapy.xr_mask_bounds(data_dict[key],\n lon_bounds=lon_bounds, lat_bounds=lat_bounds,\n select_how='outside')['PRECL']\n inside_data = climapy.xr_mask_bounds(data_dict[key],\n lon_bounds=lon_bounds, lat_bounds=lat_bounds,\n select_how='inside')['PRECL']\n outside_plus_inside = (np.nan_to_num(outside_data.values) +\n np.nan_to_num(inside_data.values))\n diff_from_input = outside_plus_inside - data_dict[key]['PRECL'].values\n assert np.abs(diff_from_input).max() == 0", "def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\r\n # MP:\r\n # overlaps: (no_rois x no_gt_bbox) each row gives the overlap of the proposed region with the gt boxes. Overlap is measured as: (overlapping area)/(union area).\r\n # gt_assignment: determines which of the gt boxes has more overlap with the regions\r\n # max_overlaps: takes the maximum overlap of a region\r\n # labels: defines which which gt box corresponds best with the region and assigns its label to the region\r\n # fg_rois_per_image = 8\r\n # overlaps: (rois x gt_boxes)\r\n\r\n # MP: bbox_overlaps rewritten as c_bbox_overlaps\r\n #overlaps =c_bbox_overlaps(np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\r\n # \t\t np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\r\n overlaps = bbox_overlaps(np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\r\n \t\t np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\r\n # MP: which column index has maximum value\r\n gt_assignment = overlaps.argmax(axis=1)\r\n max_overlaps = overlaps.max(axis=1)\r\n labels = gt_boxes[gt_assignment, 4]\r\n\r\n\r\n # MP: Extract RoIs where overlap >= FG_THRESH\r\n fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]\r\n\r\n # Guard against the case when an image has fewer than fg_rois_per_image (i.e. 8)\r\n fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)\r\n\r\n # Sample foreground regions without replacement\r\n if fg_inds.size > 0:\r\n fg_inds = npr.choice(fg_inds, size=int(fg_rois_per_this_image), replace=False)\r\n\r\n # MP: Extract RoIs where overlap in [BG_THRESH_LO, BG_THRESH_HI), i.e. [0.0, 0.5)\r\n bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &\r\n (max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\r\n\r\n # Compute number of background RoIs to take from this image (guarding\r\n # against there being fewer than desired)\r\n # MP: Take the no of bg_inds such that fg_inds.shape + bg_inds.shape = 32\r\n bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image\r\n bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)\r\n if bg_inds.size > 0:\r\n bg_inds = npr.choice(bg_inds, size=int(bg_rois_per_this_image), replace=False)\r\n\r\n\r\n # MP: concatenate the fg_inds and bg_inds, such that keep_inds.shape = 32\r\n keep_inds = np.append(fg_inds, bg_inds)\r\n # MP: obtain the labels set the ones corresponding to bg_inds to zero\r\n labels = labels[keep_inds]\r\n labels[int(fg_rois_per_this_image):] = 0\r\n\r\n # MP: select the 32 rois (fg & bg) from the 2000+ rois with the keep_inds\r\n rois = all_rois[keep_inds]\r\n # MP: fg rois\r\n rois_pos = np.zeros((fg_inds.size, 5), dtype=np.float32) #because return rois_pos as top ---> allocate memory for it\r\n rois_pos[:, :] = all_rois[fg_inds]\r\n gt_assignment_pos = gt_assignment[fg_inds]\r\n\r\n # MP: compute diff to approximate bbox to ground truth\r\n bbox_target_data = _compute_targets(\r\n rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)\r\n\r\n # MP: set the diff values in a matrix where each row corresponds to a foreground bbox\r\n # and the values are stored starting at the index of the label.\r\n # Therefore number of columns: 4*(no labels)\r\n # The bg bboxes are also included in rows, but have all values equal to zero.\r\n bbox_targets, bbox_inside_weights = \\\r\n _get_bbox_regression_labels(bbox_target_data, num_classes)\r\n\r\n '''\r\n # MP: printing and saving files\r\n print \"overlaps with size {}: {}\".format(overlaps.shape, overlaps)\r\n print \"gt_assignment with size {}: {}\".format(gt_assignment.shape, gt_assignment)\r\n print \"max_overlaps with size{}: {}\".format(max_overlaps.shape, max_overlaps)\r\n print \"labels with size{}: {}\".format(labels.shape, labels)\r\n print \"bg_inds with size{}: {}\".format(bg_inds.shape, bg_inds)\r\n print \"bg_rois_per_this_image: {}\".format(bg_rois_per_this_image)\r\n print \"bg_inds with shape {}: {}\".format(bg_inds.shape, bg_inds)\r\n print \"fg_inds with size {}: {}\".format(fg_inds.shape, fg_inds)\r\n print \"labels with shape {}: {}\".format(labels.shape,labels)\r\n print \"rois wiht shape {}: {}\".format(rois.shape, rois)\r\n print \"rois_pos wiht shape {}: {}\".format(rois_pos.shape, rois_pos)\r\n print \"labels with shape {}: {}\".format(labels.shape,labels)\r\n print \"rois_pos wiht shape {}: {}\".format(rois_pos.shape, rois_pos)\r\n print \"gt_assignment_pos wiht shape {}: {}\".format(gt_assignment_pos.shape, gt_assignment_pos)\r\n print \"bbox_target_data wiht shape {}: {}\".format(bbox_target_data.shape, bbox_target_data)\r\n print \"diff: {}\".format(rois_pos[:,:] + bbox_target_data[0:fg_inds.size,:])\r\n print \"bbox_targets with size {}: {}\".format(bbox_targets.shape, bbox_targets)\r\n print \"bbox_inside_weights with size {}: {}\".format(bbox_inside_weights.shape, bbox_inside_weights)\r\n\r\n np.savetxt('bbox_targets.txt', bbox_targets, delimiter=',')\r\n np.savetxt('bbox_inside_weights.txt', bbox_inside_weights, delimiter=',')\r\n '''\r\n\r\n return labels, rois, bbox_targets, bbox_inside_weights, gt_boxes[gt_assignment[keep_inds], :], rois_pos, gt_assignment_pos", "def test_num_samples_high(self):\n sp_file = os.path.join(\"tests\", \"data\", \"geolife\", \"geolife_staypoints.csv\")\n sp = ti.read_staypoints_csv(sp_file, tz=\"utc\", index_col=\"id\", crs=\"epsg:4326\")\n sp_ns_5, _ = sp.as_staypoints.generate_locations(\n epsilon=50, distance_metric=\"haversine\", agg_level=\"user\", num_samples=2\n )\n non_noise_sp = sp_ns_5[sp_ns_5[\"location_id\"] != -1]\n\n # group_by_user_id and check that no two different user ids share a common location id\n grouped = list(non_noise_sp.groupby([\"user_id\"])[\"location_id\"].unique())\n loc_set = []\n for loc_list in grouped:\n loc_set.append(set(loc_list))\n\n # we assert that the count of overlaps is equal to the count of users\n # (each user has overlap with the same user)\n assert sum([int(len(p & q) > 0) for p in loc_set for q in loc_set]) == len(loc_set)", "def test_overlap(self):\r\n rect1 = Rectangle(10, 20, 30, 40)\r\n rect2 = Rectangle(50, 60, 70, 80)\r\n\r\n # overlap should be commutative\r\n assert not rect1.overlap_with(rect2)\r\n assert not rect2.overlap_with(rect1)\r\n assert not Rectangle.overlap(rect1, rect2)\r\n assert not Rectangle.overlap(rect2, rect1)\r\n\r\n rect1 = Rectangle(-10, -20, 10, 60)\r\n rect2 = Rectangle(0, 50, 100, 200)\r\n assert rect1.overlap_with(rect2)\r\n assert rect2.overlap_with(rect1)\r\n assert Rectangle.overlap(rect1, rect2)\r\n assert Rectangle.overlap(rect2, rect1)\r\n\r\n # rectangles with only same boarder are not considered overlapped\r\n rect1 = Rectangle(-30, -10, -20, 0)\r\n rect2 = Rectangle(-20, -5, 30, 20)\r\n rect3 = Rectangle(-40, 0, 30, 20)\r\n assert not rect1.overlap_with(rect2)\r\n assert not rect1.overlap_with(rect3)\r\n assert not Rectangle.overlap(rect2, rect1)\r\n assert not Rectangle.overlap(rect3, rect1)", "def detect_conflict(candi_group, prob_group, cls_group,\n roi_feature_group, roi_elmo_feature_group, \n roi_label_group, roi_len_group, roi_char_ids_group, \n roi_word_lengths_group, sen_last_hidden_group,\n left_context_word_group, left_context_len_group, \n right_context_word_group, right_context_len_group):\n roi_feature_nonconf, roi_elmo_feature_nonconf, roi_label_nonconf, roi_len_nonconf = [], [], [], []\n roi_char_ids_nonconf, roi_word_lengths_nonconf, sen_last_hidden_nonconf = [], [], []\n left_context_word_nonconf, left_context_len_nonconf = [], []\n right_context_word_nonconf, right_context_len_nonconf = [], []\n\n keep = []\n orders = np.argsort(-np.array(prob_group))\n while orders.size > 0:\n save_item = list(range(orders.shape[0]))\n\n # Accept the anchor with hightest prob\n highest_idx = orders[0]\n keep.append(highest_idx)\n save_item.remove(0)\n\n if __DELETE_CONF__:\n # delete conflict anchors\n for k in range(1, len(orders)):\n if conflict(candi_group[highest_idx], candi_group[orders[k]]):\n save_item.remove(k)\n\n orders = orders[save_item]\n\n for idx in keep:\n # output probs and labels\n roi_feature_nonconf.append(roi_feature_group[idx])\n roi_elmo_feature_nonconf.append(roi_elmo_feature_group[idx])\n roi_label_nonconf.append(roi_label_group[idx])\n roi_len_nonconf.append(roi_len_group[idx])\n roi_char_ids_nonconf.append(roi_char_ids_group[idx])\n roi_word_lengths_nonconf.append(roi_word_lengths_group[idx])\n sen_last_hidden_nonconf.append(sen_last_hidden_group[idx])\n left_context_word_nonconf.append(left_context_word_group[idx])\n left_context_len_nonconf.append(left_context_len_group[idx])\n right_context_word_nonconf.append(right_context_word_group[idx])\n right_context_len_nonconf.append(right_context_len_group[idx])\n return roi_feature_nonconf, roi_elmo_feature_nonconf, roi_label_nonconf, roi_len_nonconf, roi_char_ids_nonconf, roi_word_lengths_nonconf, sen_last_hidden_nonconf, left_context_word_nonconf, left_context_len_nonconf, right_context_word_nonconf, right_context_len_nonconf", "def _print_overlapping_guards(self, model):\n has_overlap_guards = model.labeling.get_states(\"overlap_guards\")\n if has_overlap_guards.number_of_set_bits() == 0:\n return\n\n print(\"OVERLAP!\")\n print(has_overlap_guards)\n\n assert model.has_choice_origins()\n choice_origins = model.choice_origins\n conflicting_sets = []\n for state in model.states:\n if has_overlap_guards[state.id]:\n for action in state.actions:\n conflicting_sets.append(choice_origins.get_edge_index_set(state.id + action.id))\n\n for cs in conflicting_sets:\n print(choice_origins.model.restrict_edges(cs))\n exit(1)", "def _validate_no_overlap(params, error_callback):\n dhcp_set = netaddr.IPSet(netaddr.IPRange(params['dhcp_start'],\n params['dhcp_end']))\n inspection_set = netaddr.IPSet(netaddr.IPRange(params['inspection_start'],\n params['inspection_end']))\n # If there is any intersection of the two sets then we have a problem\n if dhcp_set & inspection_set:\n message = ('Inspection DHCP range \"%s-%s\" overlaps provisioning '\n 'DHCP range \"%s-%s\".' %\n (params['inspection_start'], params['inspection_end'],\n params['dhcp_start'], params['dhcp_end']))\n error_callback(message)", "def _prune_non_overlapping_boxes(self, boxes1, boxes2, min_overlap=0.0):\n with tf.name_scope('prune_non_overlapping_boxes'):\n ioa = self._ioa(boxes2, boxes1) # [M, N] tensor\n ioa = tf.reduce_max(ioa, axis=0) # [N] tensor\n keep_bool = tf.greater_equal(ioa, tf.constant(min_overlap))\n keep_inds = tf.squeeze(tf.where(keep_bool), axis=1)\n boxes = tf.gather(boxes1, keep_inds)\n return boxes, keep_inds", "def find_instances(self, image, region, overlap):\n self.image = np.copy(image)\n\n self.eff_step_size = int((1.0-overlap)*self.eff_box_size)\n\n y_steps = (region[3]-region[1])//self.eff_step_size\n x_steps = (region[2]-region[0])//self.eff_step_size\n\n if region[0]+(x_steps-1)*self.eff_step_size+self.eff_box_size>region[2]:\n x_steps -= 1\n if region[1]+(y_steps-1)*self.eff_step_size+self.eff_box_size>region[3]:\n y_steps -= 1\n\n if self.single_hog:\n self.resized_image = image[region[1]:region[3],region[0]:region[2],:]\n self.resized_image = cv2.resize(self.resized_image, (int(self.resized_image.shape[1]/self.scaling), int(self.resized_image.shape[0]/self.scaling)))\n features, img = self.hogger.hog_image(self.resized_image, visualize=False, feature_vector=False)\n features = np.array(features)\n self.find_instances_in_features(features, region)\n return self.image, self.resized_image\n else:\n for row in range(y_steps):\n off_y = region[1] + row * self.eff_step_size\n for col in range(x_steps):\n off_x = region[0]+col * self.eff_step_size\n sub_sample = self.get_resized_sub_sample(off_x, off_y)\n pred = self.classifier.classify(sub_sample)\n if(pred==1.0):\n cv2.rectangle(self.image, (off_x, off_y), (off_x+self.eff_box_size, off_y+self.eff_box_size), color=(255,255,255), thickness=2)\n self.boundings.append(((off_x, off_y), (off_x+self.eff_box_size, off_y+self.eff_box_size)))\n\n return self.image, None", "def GetOverlappingItems(self):\r\n\r\n area_bbox = self.area.GetBoundingBox()\r\n\r\n if hasattr(self.board, 'GetModules'):\r\n modules = self.board.GetModules()\r\n else:\r\n modules = self.board.GetFootprints()\r\n\r\n tracks = self.board.GetTracks()\r\n\r\n self.overlappings = []\r\n\r\n for zone in self.board.Zones():\r\n if zone.GetZoneName() != self.area.GetZoneName():\r\n if zone.GetBoundingBox().Intersects(area_bbox):\r\n self.overlappings.append(zone)\r\n\r\n for item in tracks:\r\n if (type(item) is pcbnew.PCB_VIA) and (item.GetBoundingBox().Intersects(area_bbox)):\r\n self.overlappings.append(item)\r\n if type(item) is pcbnew.PCB_TRACK:\r\n self.overlappings.append(item)\r\n\r\n for item in modules:\r\n if item.GetBoundingBox().Intersects(area_bbox):\r\n for pad in item.Pads():\r\n self.overlappings.append(pad)\r\n for zone in item.Zones():\r\n self.overlappings.append(zone)\r\n\r\n # TODO: change algorithm to 'If one of the candidate area's edges overlaps with target area declare candidate as overlapping'\r\n for i in range(0, self.board.GetAreaCount()):\r\n item = self.board.GetArea(i)\r\n if item.GetBoundingBox().Intersects(area_bbox):\r\n if item.GetNetname() != self.net:\r\n self.overlappings.append(item)", "def define_overlap_operations(self):\n self._d_i = lambda q:np.roll(q,-1,axis=-1) - q\n self._d_j = lambda q:np.roll(q,-1,axis=-2) - q", "def create_pos_overlap_metric(anchor_boxes):\n y, x, h, w = np.transpose(anchor_boxes)\n y0 = y - h // 2\n x0 = x - w // 2\n y1 = y + h // 2\n x1 = x + w // 2\n\n def pos_overlap(gt_boxes):\n pos_overlaps = []\n for gt_box in gt_boxes:\n gt_y0, gt_x0, gt_y1, gt_x1 = gt_box\n gt_area = (gt_x1 - gt_x0) * (gt_y1 - gt_y0)\n int_y0 = np.maximum(gt_y0, y0)\n int_x0 = np.maximum(gt_x0, x0)\n int_y1 = np.minimum(gt_y1, y1)\n int_x1 = np.minimum(gt_x1, x1)\n int_area = np.maximum(0, int_x1 - int_x0) * np.maximum(0, int_y1 - int_y0)\n pos_overlaps.append(int_area / gt_area)\n # Group by anchor boxes\n pos_overlaps = np.transpose(pos_overlaps)\n # Get max metric index\n gt_indices = np.argmax(pos_overlaps, axis=1)\n # Choose max metric\n pos_overlaps = np.squeeze(np.take_along_axis(pos_overlaps, gt_indices[:, np.newaxis], axis=1))\n # Take respective ground-truth boxes. No reason to return indices, at least in RPN\n gt_boxes = np.take(gt_boxes, gt_indices, axis=0)\n return pos_overlaps, gt_boxes\n return pos_overlap" ]
[ "0.6413197", "0.63037324", "0.5989347", "0.5989347", "0.5989347", "0.58602446", "0.5807427", "0.5783705", "0.5727257", "0.5725394", "0.5694002", "0.5626981", "0.5622222", "0.56103057", "0.56071097", "0.5575048", "0.5568868", "0.55536425", "0.5539821", "0.5528622", "0.552516", "0.5497782", "0.5493358", "0.5492717", "0.549019", "0.5485503", "0.5477213", "0.5475142", "0.5447958", "0.5443739" ]
0.65459293
0