query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Awesomeproject spreads pure awesomeness.
def entry_point():
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def project():", "def project():", "def project():", "def test_read_project(self):\n pass", "def test_read_project(self):\n pass", "def test_quick_build(self):\n pass", "def test_quick_build1(self):\n pass", "def main(self):", "def test_get_project(self):\n pass", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main():\n pass", "def test_good_projects(self):\n # name path main_lang\n self.do_test_good('bar', 'tmp/bunny', 'py')\n self.do_test_good('banana', 'tmp/frog', 'c')\n self.do_test_good('grinch', 'tmp/abc/def')\n self.do_test_good('grinch', 'tmp/pqr')", "def main(self):\r\n pass", "def util():\n pass", "def util():\n pass", "def main():\n return", "def task_4_3_3():\n # TODO Task 4.3.3: Your code goes here\n pass", "def main(self) -> None:\n pass", "def test_get_projects(self):\n pass", "def atlas_projects():\n pass", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def project_node():", "def test_replace_project(self):\n pass", "def task_4_3_1():\n # TODO Task 4.3.1: Your code goes here\n pass", "def build(_):", "def test_get_projects_expanded(self):\n pass", "def setup(self):", "def setup(self):", "def setup(self):", "def setup(self):", "def build():", "def smarter():\r\n pass", "def main():\n\tpass", "def task_4_3_2():\n # TODO Task 4.3.2: Your code goes here\n pass", "def main(ctx, verbose):\n return", "def main(args=None):", "def main(args=None):", "def setup(self):\n pass # pragma: no cover", "def test_patch_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def task_4_2_1():\n # TODO Task 4.2.1: Your code goes here\n pass", "def main(args):", "def main(args):", "def project_grp():\n pass", "def task4_1(self):\n\n pass", "def test_list_project(self):\n pass", "def main(cls):\n raise NotImplementedError", "def task4(self):\n\n pass", "def main() -> None:\n return", "def sth():", "def test_add_project(self):\n pass", "def setup( self ):", "def pre_build(self):", "def simple():", "def simple():", "def configure_project():\n pass", "def task5(self):\n\n pass", "def do_p(self, arg):\n self.do_project(arg)", "def task3(self):\n\n pass", "def getProjectName():", "def main():\n global GOLIVE # If False, it's a dry run only\n global PROJECT_ROOT\n global CAD_SOURCE\n global REVIT_SOURCE\n global GENERIC_SOURCE\n global FOLDER_LIST\n global logger\n\n logger = logging.getLogger('__name__')\n stream_handler = logging.StreamHandler()\n logger.addHandler(stream_handler)\n logger.setLevel(logging.INFO)\n\n logger.debug(sys.argv)\n parser = argparse.ArgumentParser(description='Create a project')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-i', action='store_true', help=\"Show INFO messages\")\n group.add_argument('-d', action='store_true', help=\"Show DEBUG messages\")\n parser.add_argument('-t', action='store_true', help='Test: dry run only')\n parser.add_argument('-r', help=\"Set root directory\")\n parser.add_argument('project_data', nargs='+', help=\"<num>%,<name>%<type>\")\n\n args = parser.parse_args(sys.argv[1:])\n logger.debug(args)\n if args.i:\n logger.info('Setting logging level to INFO')\n logger.setLevel(logging.INFO)\n elif args.d:\n logger.info('Setting logging level to DEBUG')\n logger.setLevel(logging.DEBUG)\n if args.t:\n GOLIVE = False\n logger.info('Dry run...')\n if args.r:\n PROJECT_ROOT = args.r\n logger.info(f'Setting PROJECT_ROOT to {args.r}')\n\n CAD_SOURCE = os.path.join(PROJECT_ROOT, 'Templates', 'CAD_Template')\n REVIT_SOURCE = os.path.join(PROJECT_ROOT, 'Templates', 'Revit_Template')\n GENERIC_SOURCE = os.path.join(PROJECT_ROOT,\n 'Templates', 'Generic_Template')\n FOLDER_LIST = os.listdir(PROJECT_ROOT)\n project_info = ' '.join(args.project_data) # The parser split at spaces\n logger.debug(f'Project info: {project_info}')\n project_info = project_info.split('%') # Divide it into our 3 fields\n project_number, project_name, project_type = project_info\n assert project_type in ['Revit', 'CAD', 'Generic']\n\n if checkNewProject(project_number, project_name): # Sanity checks\n success = createProject(project_number, project_name, project_type)\n if success:\n logger.info(f'Created project {project_number} {project_name}')\n else:\n logger.error('Project creation failed.')", "def _build(self):", "def _build(self):", "def _setup(self):", "def _setup(self):", "def test_list_projects(self):\n pass", "def test_list_projects(self):\n pass", "def test_4_4_1_1(self):\n pass", "def basic_project(tmp_path):\n build_dir = tmp_path / BUILD_DIRNAME\n build_dir.mkdir()\n\n # the metadata\n metadata_data = {\n \"name\": \"name-from-metadata\",\n \"summary\": \"test-summ\",\n \"description\": \"text\",\n }\n metadata_file = tmp_path / \"metadata.yaml\"\n metadata_raw = yaml.dump(metadata_data).encode(\"ascii\")\n metadata_file.write_bytes(metadata_raw)\n\n # a lib dir\n lib_dir = tmp_path / \"lib\"\n lib_dir.mkdir()\n ops_lib_dir = lib_dir / \"ops\"\n ops_lib_dir.mkdir()\n ops_stuff = ops_lib_dir / \"stuff.txt\"\n ops_stuff.write_bytes(b\"ops stuff\")\n\n # simple source code\n src_dir = tmp_path / \"src\"\n src_dir.mkdir()\n charm_script = src_dir / \"charm.py\"\n charm_script.write_bytes(b\"all the magic\")\n\n # the license file\n license = tmp_path / \"LICENSE\"\n license.write_text(\"license content\")\n\n # other optional assets\n icon = tmp_path / \"icon.svg\"\n icon.write_text(\"icon content\")\n\n # README\n readme = tmp_path / \"README.md\"\n readme.write_text(\"README content\")\n\n yield tmp_path", "def example_eleven():\n # TODO\n pass", "def setup(self) -> None:", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass" ]
[ "0.75060505", "0.75060505", "0.75060505", "0.61421615", "0.61421615", "0.6136309", "0.6004166", "0.6001538", "0.59665745", "0.5925064", "0.5925064", "0.5925064", "0.5925064", "0.59186715", "0.59168774", "0.59003806", "0.58567214", "0.58567214", "0.5793911", "0.57907754", "0.57879025", "0.57849306", "0.5784113", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5778351", "0.5765753", "0.57553786", "0.5739018", "0.57358813", "0.5714344", "0.5711453", "0.5711453", "0.5711453", "0.5711453", "0.5677852", "0.5662349", "0.5656529", "0.5656479", "0.5645834", "0.5638606", "0.5638606", "0.5630229", "0.5600039", "0.5592146", "0.5592146", "0.5592146", "0.5582484", "0.55756927", "0.55756927", "0.5568531", "0.5566257", "0.55498755", "0.5536042", "0.5520087", "0.55167884", "0.5493901", "0.54922956", "0.5479876", "0.54698896", "0.5465671", "0.5465671", "0.5463931", "0.5454331", "0.5444645", "0.5425106", "0.5423704", "0.5404873", "0.5396236", "0.5396236", "0.5395375", "0.5395375", "0.5392697", "0.5392697", "0.5392407", "0.5391976", "0.5391547", "0.53755933", "0.53643215", "0.53643215", "0.53643215", "0.53643215" ]
0.0
-1
Merge two lists sorted in descending order.
def merge(a, b): # your code here m = [] i, j = 0, 0 while i < len(a) and j < len(b): if a[i] < b[j]: m.append(a[i]) i += 1 else: m.append(b[j]) j += 1 m += a[i:] + b[j:] return m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge ( list1, list2 ):\n new_list = []\n while len(list1)>0 and len(list2)>0:\n if list1[0] < list2[0]:\n new_list.append (list1[0])\n del list1[0]\n else:\n new_list.append (list2[0])\n del list2[0]\n return new_list + list1 + list2", "def merge(l1, l2):\n\n #Reverse the lists\n l1 = list(reversed(l1))\n l2 = list(reversed(l2))\n\n ret = []\n\n while True:\n # If either list is empty, reverse the other one and append it to the end\n if not l1:\n ret.extend(reversed(l2))\n return ret\n if not l2:\n ret.extend(reversed(l1))\n return ret\n\n # Append the lowest last element of the two lists\n ret.append(l1.pop() if l1[-1] < l2[-1] else l2.pop())", "def merge(list_a, list_b):\n new_list = []\n i = 0\n j = 0\n while (i < len(list_a) and j < len(list_b)):\n if(list_a[i] < list_b[j]):\n new_list.append(list_a[i])\n i += 1\n else:\n new_list.append(list_b[j])\n j += 1\n new_list += list_a[i:]\n new_list += list_b[j:]\n\n return new_list", "def merge(list1, list2): \n result = []\n copy1, copy2 = list1[:], list2[:]\n \n while min(copy1, copy2):\n if copy1[0] < copy2[0]:\n result.append(copy1[0])\n copy1.pop(0)\n else:\n result.append(copy2[0])\n copy2.pop(0)\n \n if copy1:\n result += copy1\n elif copy2:\n result += copy2\n \n return result", "def merge(l1, l2):\n i = j = 0\n output = []\n\n while i < len(l1) and j < len(l2):\n if l1[i] <= l2[j]:\n output.append(l1[i])\n i += 1\n else:\n output.append(l2[j])\n j += 1\n\n output.extend(l1[i:] + l2[j:])\n\n return output", "def merge(l1,l2):\n\n result = []\n\n while l1 and l2:\n if l1[0] < l2[0]:\n result.append(l1.pop(0))\n else:\n result.append(l2.pop(0))\n\n while l1:\n result.append(l1.pop(0))\n\n while l2:\n result.append(l2.pop(0)) \n\n return result", "def merge_sort(a, b):\n l = []\n while a and b:\n if a[0] < b[0]:\n l.append(a.pop(0))\n else:\n l.append(b.pop(0))\n return l + a + b", "def merge(list1, list2):\n answer = []\n assert answer == sorted(answer)\n\n idx1 = 0\n idx2 = 0\n while (idx1 < len(list1)) and (idx2 < len(list2)):\n if list1[idx1] < list2[idx2]:\n answer.append(list1[idx1])\n idx1 += 1\n elif list1[idx1] > list2[idx2]:\n answer.append(list2[idx2])\n idx2 += 1\n else:\n answer.append(list1[idx1])\n answer.append(list2[idx2])\n idx1 += 1\n idx2 += 1\n assert answer == sorted(answer)\n\n answer.extend(list1[idx1:])\n answer.extend(list2[idx2:])\n\n assert answer == sorted(answer)\n return answer", "def merge(lst1, lst2):\n\n results = []\n i = 0\n j = 0\n\n while i <= len(lst1) - 1 and j <= len(lst2) - 1:\n\n if lst1[i] < lst2[j]:\n results.append(lst1[i])\n i += 1\n else:\n results.append(lst2[j])\n j += 1\n\n if i == len(lst1):\n results.extend(lst2[j:])\n else:\n results.extend(lst1[i:])\n\n return results", "def merge(list1, list2):\n res = []\n index_i, index_j = 0, 0\n while index_i < len(list1) and index_j < len(list2):\n if list1[index_i] <= list2[index_j]:\n res.append(list1[index_i])\n index_i += 1\n else:\n res.append(list2[index_j])\n index_j += 1\n res += list1[index_i:]\n res += list2[index_j:]\n return res", "def merge(list1, list2):\n result_list = []\n list1_length = len(list1)\n list2_length = len(list2)\n list1_index = 0\n list2_index = 0\n while list1_index < list1_length and list2_index < list2_length:\n if list1[list1_index] <= list2[list2_index]:\n result_list.append(list1[list1_index])\n list1_index = list1_index + 1\n else:\n result_list.append(list2[list2_index])\n list2_index = list2_index + 1\n \n if list1_index < list1_length:\n result_list.extend(list1[list1_index:])\n if list2_index < list2_length:\n result_list.extend(list2[list2_index:])\n \n return result_list", "def _merge_two_sorted_list(sorted_list_head, sorted_list_tail):\n sorted_list_result = list()\n head_index = 0\n tail_index = 0\n len_head = len(sorted_list_head)\n len_tail = len(sorted_list_tail)\n\n while head_index < len_head and tail_index < len_tail:\n print(sorted_list_head, ' : ', sorted_list_tail)\n if sorted_list_head[head_index] < sorted_list_tail[tail_index]:\n sorted_list_result.append(sorted_list_head[head_index])\n head_index += 1\n elif sorted_list_head[head_index] > sorted_list_tail[tail_index]:\n sorted_list_result.append(sorted_list_tail[tail_index])\n tail_index += 1\n elif sorted_list_head[head_index] == sorted_list_tail[tail_index]:\n sorted_list_result.append(sorted_list_head[head_index])\n sorted_list_result.append(sorted_list_tail[tail_index])\n head_index += 1\n tail_index += 1\n\n if head_index < len_head:\n sorted_list_result.extend(sorted_list_head[head_index:])\n elif tail_index < len_tail:\n sorted_list_result.extend(sorted_list_tail[tail_index:])\n\n return sorted_list_result", "def merge_ordered_list(in_list1: list, in_list2: list) -> list:\n _list1 = in_list1.copy()\n _list2 = in_list2.copy()\n _output_list = []\n idx_2 = 0\n for element in _list1:\n while idx_2 < len(_list2) and element > _list2[idx_2]:\n _output_list.append(_list2[idx_2])\n idx_2 += 1\n _output_list.append(element)\n while idx_2 < len(_list2):\n _output_list.append(_list2[idx_2])\n idx_2 += 1\n return _output_list", "def merge(a, b):\n result = []\n\n # Append smallest values to result until either list is exhausted\n i = j = 0\n while i < len(a) and j < len(b):\n if compare(a[i], b[j]) < 0:\n result.append(a[i])\n i += 1\n else:\n result.append(b[j])\n j += 1\n\n # Append all remaining values from the unexhausted list\n if i < len(a):\n result.extend(a[i:])\n else:\n result.extend(b[j:])\n\n return result", "def merge_lists(a_lst, b_lst):\n\n i = 0\n j = 0\n merged_list = []\n while i < len(a_lst) and j < len(b_lst):\n \n if a_lst[i] < b_lst[j]:\n merged_list.append(a_lst[i])\n i += 1\n else:\n merged_list.append(b_lst[j])\n j += 1\n if i < len(a_lst):\n merged_list.extend(a_lst[i:])\n if j < len(b_lst):\n merged_list.extend(b_lst[j:])\n return merged_list", "def merge_in(list_a: list, list_b: list):\n end_a = 0\n\n while list_a[end_a] is not None:\n end_a += 1\n end_a -= 1\n\n assert (end_a + len(list_b) < len(list_a))\n\n a_index = end_a\n b_index = len(list_b) - 1\n\n for k in range(len(list_a) - 1, -1, -1):\n if b_index < 0 or (a_index >= 0 and list_a[a_index] > list_b[b_index]):\n list_a[k] = list_a[a_index]\n a_index -= 1\n else:\n list_a[k] = list_b[b_index]\n b_index -= 1", "def merge(list1: list, list2: list) -> list:\r\n result = []\r\n i = 0\r\n j = 0\r\n # Iterate through each element and append the smaller element of each list to the resulting list.\r\n while i < len(list1) and j < len(list2):\r\n if list1[i] < list2[j]:\r\n result.append(list1[i])\r\n i += 1\r\n else:\r\n result.append(list2[j])\r\n j += 1\r\n\r\n # Append the remaining lists to the resulting list.\r\n result.extend(list1[i:])\r\n result.extend(list2[j:])\r\n return result", "def merge_lists(list_1, list_2):\n if len(list_1) == 0:\n return list_2\n if len(list_2) == 0:\n return list_1\n\n new_list = []\n length = len(list_1) + len(list_2)\n while len(new_list) < length:\n if len(list_1) == 0:\n new_list = new_list + list_2\n elif len(list_2) == 0:\n new_list = new_list + list_1\n\n elif list_1[0] < list_2[0]:\n new_list.append(list_1[0])\n list_1.remove(list_1[0])\n elif list_1[0] >= list_2[0]:\n new_list.append(list_2[0])\n list_2.remove(list_2[0])\n return new_list", "def merge(list1, list2): \r\n if len(list1) == 0 or len(list2) == 0:\r\n new_list = [item for item in list1]\r\n new_list.extend(list2)\r\n return new_list\r\n else:\r\n if list1[0] <= list2[0]:\r\n new_list = list([list1[0]])\r\n new_list.extend(merge(list1[1:], list2))\r\n return new_list\r\n else:\r\n new_list = list([list2[0]])\r\n new_list.extend(merge(list1, list2[1:]))\r\n return new_list", "def merge(list1: list, list2: list) -> list:\n output = []\n i, j = 0, 0\n while i < len(list1) and j < len(list2):\n if list1[i][1] <= list2[j][1]:\n output += [list1[i]]\n i += 1\n else:\n output += [list2[j]]\n j += 1\n return output + list1[i:] + list2[j:]", "def merge(left_sort_list, right_sort_list):\n left_index = 0\n right_index = 0\n left_len = len(left_sort_list)\n right_len = len(right_sort_list)\n temp_list = []\n while left_index <= left_len - 1 and right_index <= right_len - 1:\n if left_sort_list[left_index] <= right_sort_list[right_index]:\n temp_list.append(left_sort_list[left_index])\n left_index += 1\n else:\n temp_list.append(right_sort_list[right_index])\n right_index += 1\n if left_index == left_len:\n temp_list += right_sort_list[right_index:]\n else:\n temp_list += left_sort_list[left_index:]\n return temp_list", "def merge(list1, list2):\n merged = []\n if len(list1) < 1 or len(list2) <1:\n return list1 + list2\n else:\n ind_1 = 0\n ind_2 = 0\n while ind_1 < len(list1) and ind_2 < len(list2):\n #some appends to lists\n if list1[ind_1] < list2[ind_2]:\n merged.append(list1[ind_1])\n ind_1 += 1\n elif list2[ind_2] < list1[ind_1]:\n merged.append(list2[ind_2])\n ind_2 += 1\n elif list1[ind_1] == list2[ind_2]:\n merged.append(list1[ind_1])\n merged.append(list2[ind_2])\n ind_1 += 1\n ind_2 += 1\n #if reach end of one list, copy the remainder of the other\n if ind_1 >= len(list1) and ind_2 < len(list2):\n merged += list2[ind_2:]\n ind_2 = len(list2)\n elif ind_2 >= len(list2) and ind_1 < len(list1):\n merged += list1[ind_1:]\n ind_1 = len(list1)\n return merged", "def merge(list_1, list_2):\n l1, l2 = len(list_1), len(list_2) # Store the length of each list\n merged_output = [None for i in range(l1 + l2)]\n i, j = 0, 0\n # Compare each element of the two lists till one of them is exhausted\n while i < l1 and j < l2:\n if list_1[i] <= list_2[j]:\n merged_output[i + j] = list_1[i]\n i += 1\n else:\n merged_output[i + j] = list_2[j]\n j += 1\n\n # Check if list_1 is exhausted, add remaining element to the output\n for j in range(j, l2):\n merged_output[i + j] = list_2[j]\n\n # Check if list_2 is exhausted, add remaining element to the output\n for i in range(i, l1):\n merged_output[i + j] = list_1[i]\n\n # print(merged_output)\n return merged_output", "def merge_two(l, r):\n new = []\n i1, i2 = 0, 0\n while i1 != len(l) and i2 != len(r):\n if l[i1] < r[i2]:\n new.append(l[i1])\n i1 += 1\n else:\n new.append(r[i2])\n i2 += 1\n\n new.extend(l[i1:])\n new.extend(r[i2:])\n return new", "def merge(a,b):\n c = []\n while len(a) != 0 and len(b) != 0:\n if a[0] < b[0]:\n c.append(a[0])\n a.remove(a[0])\n else:\n c.append(b[0])\n b.remove(b[0])\n if len(a) == 0:\n c += b\n else:\n c += a\n return c", "def merge_reversed(left, right):\n \n merged = []\n left_index = 0\n right_index = 0\n \n while left_index < len(left) and right_index < len(right):\n if left[left_index] > right[right_index]:\n merged.append(left[left_index])\n left_index += 1\n else:\n merged.append(right[right_index])\n right_index += 1\n\n merged += left[left_index:]\n merged += right[right_index:]\n \n return merged", "def merge(list1, list2):\n merge_list = []\n l1_copy = list(list1)\n l2_copy = list(list2)\n\n # cycling through list1 and list2: we check the first element in\n # list2, if it's smaller than the first element in list1 we copy it to\n # the merge list and pop it out of list2. Else we break the loop and\n # copy the first element of list1, then pop it and proceed again\n while l1_copy:\n while l2_copy:\n if l2_copy[0] < l1_copy[0]:\n merge_list.append(l2_copy[0])\n l2_copy.pop(0)\n else:\n break\n merge_list.append(l1_copy[0])\n l1_copy.pop(0)\n\n # if list2 is not empty once list1 is, add the remaining elements to the\n # end of the merge list\n if l2_copy:\n merge_list.extend(l2_copy)\n\n return merge_list", "def merge(left, right):\n ret = []\n li = ri = 0\n while li < len(left) and ri < len(right):\n if left[li] <= right[ri]:\n ret.append(left[li])\n li += 1\n else:\n ret.append(right[ri])\n ri += 1\n if li == len(left):\n ret.extend(right[ri:])\n else:\n ret.extend(left[li:])\n return ret", "def merge(left, right):\n\n # Initializing pointers.\n leftPtr = 0\n rightPtr = 0\n result = []\n\n # Merging and sorting two sublists.\n while leftPtr < len(left) and rightPtr < len(right):\n if left[leftPtr][0] < right[rightPtr][0] or \\\n (left[leftPtr][0] == right[rightPtr][0] and left[leftPtr][1] < right[rightPtr][1]):\n result.append(left[leftPtr])\n leftPtr += 1\n else:\n result.append(right[rightPtr])\n rightPtr += 1\n\n # Extending the leftover in the sublists.\n if leftPtr < len(left):\n result.extend(left[leftPtr:])\n elif rightPtr < len(right):\n result.extend(right[rightPtr:])\n\n return result", "def merge_alt(nums1, nums2):\r\n length = len(nums1)\r\n i = 0\r\n while nums2 and i < length:\r\n element = nums2.pop(0)\r\n if element < nums1[i]:\r\n nums1.insert(i, element)\r\n else:\r\n nums1.insert(i + 1, element)\r\n i += 1\r\n length += 1\r\n i += 1\r\n nums1 += nums2", "def merge(left, right):\n new = []\n left_index, right_index = 0, 0\n len_left, len_right = len(left), len(right)\n while left_index < len_left and right_index < len_right:\n if left[left_index] <= right[right_index]:\n new.append(left[left_index])\n left_index += 1\n else:\n new.append(right[right_index])\n right_index += 1\n new += left[left_index:]\n new += right[right_index:]\n return new", "def merge(left, right):\n\n ret = []\n\n while len(left) != 0 and len(right) != 0:\n if left[0] <= right[0]:\n ret.append(left.pop(0))\n else:\n ret.append(right.pop(0))\n\n while len(left) != 0:\n ret.append(left.pop(0))\n \n while len(right) != 0:\n ret.append(right.pop(0))\n \n return ret", "def merge(first, second, reverse):\r\n final = [0]*(len(first)+len(second))\r\n i = 0\r\n j = 0\r\n if reverse:\r\n while i+j < len(final):\r\n if (j >= len(second)) or ((i < len(first)) and (first[i] > second[j])):\r\n final[i+j] = first[i]\r\n i += 1\r\n else:\r\n final[i+j] = second[j]\r\n j += 1\r\n else:\r\n while i+j < len(final):\r\n if (j >= len(second)) or ((i < len(first)) and (first[i] < second[j])):\r\n final[i+j] = first[i]\r\n i += 1\r\n else:\r\n final[i+j] = second[j]\r\n j += 1\r\n return final", "def merge(items1, items2):\n # TODO: Running time: O(n + m), where n is the size of items 1 and m is the size of items 2\n # TODO: Memory usage: ??? Why and under what conditions\n # TODO: Repeat until one list is empty\n left_index = 0\n right_index = 0\n merge_list = []\n while (left_index < len(items1)) and (right_index < len(items2)):\n # TODO: Find minimum item in both lists and append it to new list\n if items1[left_index] > items2[right_index]:\n merge_list.append(items2[right_index])\n right_index += 1\n elif items1[left_index] < items2[right_index]:\n merge_list.append(items1[left_index])\n left_index += 1\n elif items1[left_index] == items2[right_index]:\n merge_list.append(items1[left_index])\n merge_list.append(items2[right_index])\n right_index += 1\n left_index += 1\n # TODO: Append remaining items in non-empty list to new list\n if left_index == len(items1):\n merge_list.extend(items2[right_index:])\n elif right_index == len(items2):\n merge_list.extend(items1[left_index:])\n\n # Alternate solution\n # Add remaining items to merge_sort list from either items1 or items2\n # Only one is guaranteed to run \n # for index in range(left_index, len(items1)):\n # merge_sort.append(index)\n\n # for index in range(right_index, len(items1)): \n # merge_sort.append(index)\n return merge_list", "def merge_lists_w_ordering(a: List[Any], b: List[Any]) -> List[Any]:\n overlap = set(a).intersection(b)\n\n result = []\n\n current, other = iter(a), iter(b)\n\n while True:\n for element in current:\n if element in overlap:\n overlap.discard(element)\n other, current = current, other\n break\n\n result.append(element)\n else:\n result.extend(other)\n break\n\n return result", "def merge(a: List[int], b: List[int]) -> List[int]:\n merged = []\n i = j = 0\n alen = len(a)\n blen = len(b)\n while i < alen or j < blen:\n aval = a[i] if i < alen else float(\"inf\")\n bval = b[j] if j < blen else float(\"inf\")\n if aval <= bval:\n merged.append(a[i])\n i += 1\n else:\n merged.append(b[j])\n j += 1\n return merged", "def merge(lst1, lst2):\n if not lst1 or not lst2:\n return lst1 + lst2\n elif lst1[0] < lst2[0]:\n return [lst1[0]] + merge(lst1[1:], lst2)\n else:\n return [lst2[0]] + merge(lst1, lst2[1:])", "def merge(arr1, arr2):\n out = []\n # Iterate while neither list is empty\n while arr1 and arr2:\n # Compare heads, pop smallest head and append to output\n if arr1[0] <= arr2[0]:\n out.append(arr1.pop(0))\n else:\n out.append(arr2.pop(0))\n # Concat whichever array has more elements\n if arr1:\n out.extend(arr1)\n else:\n out.extend(arr2)\n return out", "def merge(left, right):\n\n ## if the list is empty\n if not len(left) or not len(right):\n return left or right\n\n ## merge the list in sorted manner\n result = []\n i, j = 0, 0\n while (len(result) < len(left) + len(right)):\n if left[i] < right[j]:\n result.append(left[i])\n i+= 1\n else:\n result.append(right[j])\n j+= 1\n if i == len(left) or j == len(right):\n result.extend(left[i:] or right[j:])\n break\n \n return result", "def merge(left, right):\n aList = []\n lt = 0\n rt = 0\n\n #Repeatedly move the smallest of left and right to the new list\n while lt < len(left) and rt < len(right):\n if left[lt] < right[rt]:\n aList.append(left[lt])\n lt += 1\n else:\n aList.append(right[rt])\n rt += 1\n\n #There will only be elements left in one of the original two lists.\n\n #Append the remains of left (lt..end) on to the new list.\n while lt < len(left):\n aList.append(left[lt])\n lt += 1\n \n #Append the remains of right (rt..end) on to the new list.\n while rt < len(right):\n aList.append(right[rt])\n rt += 1\n\n return aList", "def merge(items1, items2):\n # TODO: Repeat until one list is empty\n # TODO: Find minimum item in both lists and append it to new list\n # TODO: Append remaining items in non-empty list to new list\n sorted_list = []\n while len(items1) > 0 and len(items2) > 0:\n if items1[0] > items2[0]:\n sorted_list.append(items2.pop(0))\n else:\n sorted_list.append(items1.pop(0))\n sorted_list.extend(items1)\n del items1\n sorted_list.extend(items2)\n del items2\n return sorted_list\n\n # front = 0\n # back = (len(items1) - 1)\n # while len(items2) > 0:\n # value = items2.pop()\n # while front <= back:\n # pivot = ((front + back) // 2)\n # # if p f and b all equal the same index\n # if front == back:\n # # if the value is greater append at the back\n # if value > items1[back]:\n # items1.insert(back + 1, value)\n # break\n # # if the value is less than insert at index 0\n # if items1[back] < value:\n # items1.insert(0, value)\n # break\n # # if the value is equal to the value insert at index 0\n # # if f, p, and b are greater than the value\n # if items1[front] > value:\n # # insert the value before f and p\n # items1.insert(front, value)\n # break\n # # if b, p, and f are less than the value\n # if items1[back] < value:\n # # insert the value after b and p\n # items1.insert(back + 1, value)\n # break\n # if items1[pivot] > value:\n # back = pivot - 1\n # elif items1[pivot] < value:\n # front = pivot + 1\n # elif items1[pivot] == value:\n # items1.insert(pivot + 1, value)\n # break\n # front = 0\n # back = (len(items1) - 1)\n # return items1", "def merge_two_sorted_lists(lst1, lst2):\n\n dummy_head = tail = ListNode() # head and tail start pointing to the same dummy node, then tail converges\n while lst1 and lst2:\n if lst1.data < lst2.data:\n tail.next = lst1 # the FIRST tail.next node is where the actual merge begins\n lst1 = lst1.next\n else:\n tail.next = lst2\n lst2 = lst2.next\n tail = tail.next\n # append the remaining nodes of list 1 or list 2\n tail.next = lst1 or lst2 # when one list becomes None, the 'or' returns the remaining nodes of the other\n return dummy_head.next # dummy_head.next is the node appended with the FIRST tail.next statement", "def merge(arr1,arr2):\n i = 0\n j = 0\n new_list = []\n while i < len(arr1) and j < len(arr2):\n if arr1[i] <= arr2[j]:\n new_list.append(arr1[i])\n i += 1\n else:\n new_list.append(arr2[j])\n j += 1\n if i == len(arr1):\n new_list.extend(arr2[j:])\n if j == len(arr2):\n new_list.extend(arr1[i:])\n return new_list", "def merge(items1, items2):\r\n # TODO: Repeat until one list is empty\r\n # TODO: Find minimum item in both lists and append it to new list\r\n # TODO: Append remaining items in non-empty list to new list\r", "def merge(l1, l2):\n # Edge cases, where nothing is to be done.\n if l1 is None and l2 is None: return l1\n if l1 is None: return l2\n if l2 is None: return l1\n\n # Vars to hold,\n # head -> a dummy head to keep a reference to the start of the merged\n # list.\n # _iter -> to move through the merged list.\n head = ListNode(float('-inf'))\n _iter = head\n\n # As long as both the lists are not exhausted,\n while l1 and l2:\n\n # Make the next of _iter as the smaller node.\n if l1.val <= l2.val:\n _iter.next = l1\n l1 = l1.next\n else:\n _iter.next = l2\n l2 = l2.next\n # Move _iter forward.\n _iter = _iter.next\n\n # If either of the lists remain, add them to the end,\n # Note: at-least one of the lists would be exhausted by now,\n # and the remaining one is sorted in itself, which is why this works.\n if not l1: _iter.next = l2\n if not l2: _iter.next = l1\n\n # Return a reference to the start of the merged list.\n return head.next", "def merge(self, A: List[int], m: int, B: List[int], n: int) -> None:\n A[m:] = B\n A.sort()", "def merge():\n a = []\n b = []\n c = []\n d = []\n print \"Enter number of elements in first list\"\n n = int(raw_input())\n print \"enter number of elements in second list\"\n m = int(raw_input())\n print \"Now Enter the elements of first list\"\n for k in range(n):\n a.append(raw_input(\"enter an element:\"))\n print \"Now Enter the elements of second list\"\n for l in range(m):\n b.append(raw_input(\"enter an element:\"))\n\n print a\n print b\n \n for i in a:\n c.append(i)\n for j in b:\n c.append(j)\n length = m + n\n for p in range(length):\n temp = c[0]\n for i in c:\n if int(i) <= int(temp):\n temp = i\n d.append(temp)\n c.remove(temp)\n \n \n print 'The merged list in increasing order is:',d", "def merge(left, right):\n\tl = []\n\ti = 0\n\tj = 0\n\n\twhile i < len(left) and j < len(right):\n\t\tif left[i] < right[j]:\n\t\t\tl.append(left[i])\n\t\t\ti += 1\n\t\telse:\n\t\t\tl.append(right[j])\n\t\t\tj += 1\n\n\twhile i < len(left):\n\t\tl.append(left[i])\n\t\ti += 1\n\n\twhile j < len(right):\n\t\tl.append(right[j])\n\t\tj += 1\n\n\treturn l", "def reverse_sort_lists(list_1, list_2):\n list_1_sorted, list_2_sorted = zip(*sorted(zip(list_1, list_2), key=operator.itemgetter(0), reverse=True))\n return list_1_sorted, list_2_sorted", "def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n nums1[:]=sorted((nums1[:m]+nums2[:n]))", "def merge(first_list, second_list):\r\n result_list = []\r\n\r\n def check_for_group():\r\n \"\"\"Inner function,so that it has access to merges' local variables,\r\n that checks for groups\"\"\"\r\n if first_list[0][0] == second_list[0][0]:\r\n try:\r\n result = first_list[0][0], str(int(first_list[0][1]) + int(second_list[0][1]))\r\n except ValueError:\r\n result = first_list[0][0], str(float(first_list[0][1]) + float(second_list[0][1]))\r\n result_list.append(result)\r\n first_list.remove(first_list[0])\r\n second_list.remove(second_list[0])\r\n return True\r\n return False\r\n\r\n while first_list and second_list:\r\n if first_list[0] > second_list[0]:\r\n if not check_for_group():\r\n result_list.append(second_list[0])\r\n second_list.remove(second_list[0])\r\n else:\r\n if not check_for_group():\r\n result_list.append(first_list[0])\r\n first_list.remove(first_list[0])\r\n empty_lists(first_list, second_list, result_list)\r\n return result_list", "def _merge_lists(list1, list2):\n for v2 in reversed(list2):\n if isinstance(v2, Descriptor):\n if v2 in list1:\n v1 = list1.pop(list1.index(v2))\n list1.insert(0, v1.merge(v2))\n else:\n list1.insert(0, v2)\n elif isinstance(v2, list):\n raise CekitError(\"Cannot merge list of lists\")\n else:\n if v2 not in list1:\n list1.insert(0, v2)\n\n return list1", "def _merge(S1, S2, mylist):\n i = 0\n j = 0\n while i + j < len(mylist):\n if j == len(S2) or (i < len(S1) and S1[i] < S2[j]):\n mylist[i+j] = S1[i] # Copy ith element of S1 as next item of mylist\n i += 1\n else:\n mylist[i+j] = S2[j] # Copy jth element of S2 as next item of mylist\n j += 1", "def merge_sort(list1):\n if len(list1) <= 1:\n answer = list(list1)\n assert answer == sorted(answer)\n return answer\n\n mid = len(list1) // 2\n\n list_low = merge_sort(list1[0:mid])\n list_high = merge_sort(list1[mid:])\n\n answer = merge(list_low, list_high)\n assert answer == sorted(answer)\n return answer", "def merge(a1, a2):\n\n i, j = 0, 0\n result = [] # resulting array\n while i < len(a1) and j < len(a2): # both array have iterables\n if a1[i] < a2[j]:\n result.append(a1[i])\n i += 1\n elif a1[i] > a2[j]:\n result.append(a2[j])\n j += 1\n else:\n result.append(a1[i])\n result.append(a2[j])\n i += 1\n j += 1\n\n if i == len(a1): # array a1 was exhaused, append the remaining contents of the second array to the result\n result.extend(a2[j:])\n if j == len(a2): # array a2 was exhaused, append the remaining contents of the first array to the result\n result.extend(a1[i:])\n\n return result", "def merge(arr1, arr2):\n\tres = []\n\n\ti = j = 0\n\n\twhile i< len(arr1) and j < len(arr2):\n\t\tif arr1[i] < arr2[j]:\n\t\t\tres.append(arr1[i])\n\t\t\ti+=1\n\t\telse:\n\t\t\tres.append(arr2[j])\n\t\t\tj+=1\n\n\twhile i < len(arr1):\n\t\tres.append(arr1[i])\n\t\ti +=1\n\n\twhile j < len(arr2):\n\t\tj +=1\n\t\tres.append(arr2[j])\n\n\treturn res", "def merge(left_list, right_list):\n if not len(left_list) or not len(right_list):\n return left_list or right_list\n\n result = []\n i, j = 0, 0\n left_trips_dict = {trip.trip_id: trip for trip in left_list}\n right_trips_dict = {trip.trip_id: trip for trip in right_list}\n while (len(result) < len(left_list) + len(right_list)):\n ranked_two_trips_ids = fixtures.rank_trips([left_list[i],right_list[j]])\n # if ids[0] belogs to left, ad the trip of id[0] to result and inc the left\n if ranked_two_trips_ids[0] in left_trips_dict.keys():\n result.append(left_trips_dict[ranked_two_trips_ids[0]])\n i+= 1\n else:\n result.append(right_trips_dict[ranked_two_trips_ids[0]])\n j+= 1\n if i == len(left_list) or j == len(right_list):\n result.extend(left_list[i:] or right_list[j:])\n break \n return result", "def merge(sorted_left, sorted_right):\n merged = []\n idx_left, idx_right = 0, 0\n \n while idx_left < len(sorted_left) and idx_right < len(sorted_right):\n if sorted_left[idx_left] <= sorted_right[idx_right]:\n merged.append(sorted_left[idx_left])\n idx_left += 1\n else:\n merged.append(sorted_right[idx_right])\n idx_right += 1\n \n # Append the remaining to merged\n # If you want to determine which half remains\n \"\"\"\n if idx_left < len(sorted_left):\n merged.extend(sorted_left[idx_left:])\n else:\n merged.extend(sorted_right[idx_right:])\n \"\"\"\n \n merged.extend(sorted_left[idx_left:])\n merged.extend(sorted_right[idx_right:])\n return merged", "def merge_sorted_list(left_sublist,right_sublist):\n left_index=right_index=0\n sorted_list=[]\n base_list_length=len(left_sublist)+len(right_sublist)\n while len(sorted_list)<base_list_length:\n if left_sublist[left_index]<right_sublist[right_index]:\n sorted_list.append(left_sublist[left_index])\n left_index+=1\n else:\n sorted_list.append(right_sublist[right_index])\n right_index+=1\n \n if left_index==len(left_sublist):\n sorted_list+=right_sublist[right_index:]\n break\n if right_index==len(right_sublist):\n sorted_list+=left_sublist[left_index:]\n break\n \n return sorted_list", "def merge(nums1,n,nums2,m):\r\n \r\n #intuitive O('n' + nlogn) time solution\r\n \r\n nums1[m:] = nums2[:n]\r\n nums1.sort()", "def merge(list1, list2):\n holding = list1.to_list()\n [holding.append(i) for i in list2.to_list()]\n # for i in list2.to_list():\n # holding.append(i)\n holding = sorted(holding)\n\n output = LinkedList(Node(holding[0]))\n for i in holding[1:]:\n output.append(i)\n return output", "def merge(left, right):\r\n \r\n ls = []\r\n i = 0\r\n j = 0\r\n \r\n while i < len(left) and j < len(right):\r\n if left[i] < right[j]:\r\n ls.append(left[i])\r\n i += 1\r\n else:\r\n ls.append(right[j])\r\n j += 1\r\n \r\n \r\n while i < len(left):\r\n ls.append(left[i])\r\n i += 1\r\n \r\n while j < len(right):\r\n ls.append(right[j])\r\n j += 1\r\n \r\n return ls", "def merge(left,right):\n result = []\n comparision_count = 0\n left_index , right_index = 0 , 0\n # Compare elements of one list with another until we run out of atleast one list\n while left_index < len(left) and right_index < len(right):\n comparision_count = comparision_count + 1\n if left[left_index] < right[right_index]:\n result.append(left[left_index])\n left_index = left_index + 1\n else:\n result.append(right[right_index])\n right_index = right_index + 1\n # Appending the rest of the elements to the result\n for element in left[left_index:]:\n result.append(element)\n for element in right[right_index:]:\n result.append(element)\n return (result,comparision_count)", "def merge(l, s1, l1, s2, l2):\n nonlocal c, r, w\n\n # Create temporary list to store sorted value\n tempList = l.copy() \n\n # Compare pairs of values of two list, start from the first element\n i = s1 # Beginning of the left list\n j = s2 # Beginning of the right list\n k = 0\n\n # Compare and add to temporary list\n c += 2\n while i <= l1 and j <= l2: \n c += 3\n r += 2 \n w += 1 \n if l[i] < l[j]:\n tempList[k] = l[i]\n i = i + 1\n k = k + 1 \n else:\n tempList[k] = l[j]\n j = j + 1\n k = k + 1\n\n # Copy remaining elements of the first list\n c += 1\n while i <= l1:\n tempList[k] = l[i]\n i = i + 1\n k = k + 1\n c += 1\n r += 1\n w += 1\n\n # Copy remaining elements of the second list \n c += 1\n while j <= l2:\n tempList[k] = l[j]\n j = j + 1\n k = k + 1\n c += 1\n r += 1\n w += 1\n\n # Copy elements from tempList to list l\n i = s1\n j = 0 \n c += 1\n while i <= l2:\n l[i] = tempList[j]\n i = i + 1\n j = j + 1\n c += 1\n w += 1 \n r += 1", "def _merge_lists(cls, li1, li2):\n if not li1:\n return li2[:]\n elif not li2:\n return li1[:]\n else:\n li = li1[:]\n for el in li2:\n if el not in li:\n li.append(el)\n return li", "def merge (left, right):\n i = 0\n j = 0\n n = len(left)\n m = len(right)\n out = []\n\n while i < n and j < m:\n if left[i] < right[j]:\n out.append(left[i])\n i += 1\n else:\n out.append(right[j])\n j += 1\n\n if i is n:\n for l in xrange(j, m):\n out.append(right[l])\n elif j is m:\n for l in xrange(i, n):\n out.append(left[l])\n\n return out", "def merge(left,right):\n l = []\n i = 0\n j = 0\n\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n l.append(left[i])\n i += 1\n else:\n l.append(right[j])\n j += 1\n\n while i < len(left):\n l.append(left[i])\n i += 1\n while j < len(right):\n l.append(right[j])\n j += 1\n return l", "def linear_merge(sorted1, sorted2):\n first_pointer = 0\n second_pointer = 0\n sorted_result = []\n\n while second_pointer < len(sorted2) and first_pointer < len(sorted1):\n if sorted1[first_pointer] < sorted2[second_pointer]:\n sorted_result.append(sorted1[first_pointer])\n first_pointer += 1\n else:\n sorted_result.append(sorted2[second_pointer])\n second_pointer += 1\n\n while second_pointer < len(sorted2):\n sorted_result.append(sorted2[second_pointer])\n second_pointer += 1\n\n while first_pointer < len(sorted1):\n sorted_result.append(sorted1[first_pointer])\n first_pointer += 1\n\n\n return sorted_result", "def merge(self, A: List[int], m: int, B: List[int], n: int) -> None:\n # # solution one: sort\n # A[m:] = B\n # A.sort()\n\n # solution two: two point\n if n == 0: # B = []\n return\n i, j, k = m - 1, n - 1, m + n - 1\n while i > -1 and j > -1: # > -1, if m = 0 or n = 0, then i = -1 or j = -1\n if A[i] <= B[j]:\n A[k] = B[j]\n k -= 1\n j -= 1\n else:\n A[k] = A[i]\n k -= 1\n i -= 1\n if j > -1:\n A[:j + 1] = B[:j + 1] # A = [], B = [1]", "def merge_sort(in_list1: list) -> list:\n if in_list1 is None:\n return []\n if len(in_list1) == 1:\n return [in_list1[0]]\n _list1,_list2= in_list1[:int(((len(in_list1)+1)/2))],\\\n in_list1[int(((len(in_list1)+1)/2)):]\n _ordered_list1 = merge_sort(_list1)\n _ordered_list2 = merge_sort(_list2)\n return merge_ordered_list(_ordered_list1,_ordered_list2)", "def merge(self, nums1, m, nums2, n):\n offset = len(nums1) - m\n i = 0\n while i < len(nums1) and nums2:\n num = nums1[i]\n num2 = nums2[0]\n\n if num2 < num:\n nums1.insert(i, num2)\n nums2.pop(0)\n i += 1\n\n if i >= len(nums1) and nums2 is not None:\n nums1[len(nums1) - offset:] = nums2\n while len(nums1) > m + n:\n nums1.pop()", "def merge_sort(list1):\n if len(list1) <= 1:\n return list1\n \n mid_point = int(len(list1)/2)\n \n return merge(merge_sort(list1[:mid_point]), merge_sort(list1[mid_point:]))", "def merge(nums1, m, nums2, n):\r\n while len(nums1) != m and nums1[len(nums1)-1] == 0:\r\n nums1.pop()\r\n for each in nums2:\r\n nums1.append(each)\r\n nums1.sort()", "def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n # temp1, temp2 = nums1[:m], nums2[:n]\n # ans = []\n\n # while len(temp1) or len(temp2):\n # if not len(temp1):\n # ans.extend(temp2)\n # break\n # if not len(temp2):\n # ans.extend(temp1)\n # break\n # head_1, head_2 = temp1[0], temp2[0]\n\n # if head_1 <= head_2:\n # ans.append(head_1)\n # temp1 = temp1[1:]\n # else:\n # ans.append(head_2)\n # temp2 = temp2[1:]\n\n # for i in range(len(ans)):\n # nums1[i] = ans[i]\n\n while m > 0 and n > 0:\n if nums1[m-1] > nums2[n-1]:\n nums1[m+n-1] = nums1[m-1]\n m -= 1\n else:\n nums1[m+n-1] = nums2[n-1]\n n -= 1\n\n nums1[:n] = nums2[:n]", "def merge_down(lists):\r\n lst1 = transpose(lists)\r\n lst2 = merge_AllRight(lst1)\r\n lst3 = transpose(lst2)\r\n\r\n lists = lst3\r\n\r\n return lists", "def merge(l, r):\n print(\"merge [l={}, r={}]\".format(l, r))\n t = [None] * (len(l) + len(r))\n ti = 0\n \n i, j = 0, 0\n\n while i < len(l) and j < len(r):\n if l[i] <= r[j]:\n t[ti] = l[i]\n i += 1\n else:\n t[ti] = r[j]\n j += 1\n ti += 1\n\n # copy remaining items from one of either l or r...\n if i < len(l):\n while i < len(l):\n t[ti] = l[i]\n i += 1\n ti += 1\n else:\n while j < len(r):\n t[ti] = r[j]\n j+= 1\n ti += 1\n\n return t", "def merge(arr1, arr2):\n i = 0\n j = 0\n sol = []\n while i < len(arr1) and j < len(arr2):\n if arr1[i] <= arr2[j]:\n sol.append(arr1[i])\n i += 1\n else:\n sol.append(arr2[j])\n j += 1\n if i < len(arr1):\n sol.extend(arr1[i:])\n if j < len(arr2):\n sol.extend(arr2[j:])\n return sol", "def merge_sort(list1):\n if len(list1) <= 1:\n return list1\n left = merge_sort(list1[:len(list1)/2])\n right = merge_sort(list1[len(list1)/2:])\n return merge(left, right)", "def merge(self, nums1, m, nums2, n):\n nums1.extend([0]*len(nums2))\n j=0\n for i in range(len(nums2)):\n if nums2[i]<nums1[j]:\n nums1.pop()\n print(nums1)\n nums1.insert(j,nums2[i])\n j=j+1", "def merge(left, right):\n left_index = 0\n right_index = 0\n result = []\n # Copy the smaller element amongst the left and the right half\n # and add to the list\n while left_index < len(left) and right_index < len(right):\n if left[left_index] <= right[right_index]:\n result.append(left[left_index])\n left_index += 1\n else:\n result.append(right[right_index])\n right_index += 1\n # Copy any elements remaining in the left half\n if left_index < len(left):\n result.extend(left[left_index:])\n # Copy any elements remaining in the right half\n if right_index < len(right):\n result.extend(right[right_index:])\n return result", "def mergeArrays(a, b):\n i = 0\n j = 0\n o = []\n aLen = len(a)\n bLen = len(b)\n\n # go over the array from lower to higher\n while i < aLen and j < bLen:\n if a[i] < b[j]:\n o.append(a[i])\n i += 1\n elif b[j] < a[i]:\n o.append(b[j])\n j += 1\n else:\n # equals a[i] and b[j]\n o.append(a[i])\n i += 1\n j += 1\n\n # append the rest\n while i < aLen:\n o.append(a[i])\n i += 1\n\n while j < bLen:\n o.append(b[j])\n j += 1\n\n return o", "def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n last_ptr = (m + n) - 1\n\n while m > 0 and n > 0:\n if nums1[m-1] > nums2[n-1]:\n nums1[last_ptr] = nums1[m-1]\n m += -1\n else:\n nums1[last_ptr] = nums2[n-1]\n n += -1\n last_ptr += -1\n\n while n > 0:\n nums1[last_ptr] = nums2[n-1]\n n += -1\n last_ptr += -1", "def merge_lists(l1, l2):\n return [ *l1, *l2 ]", "def merge(S1, S2, S):\n i = j = 0\n while i + j < len(S):\n if j == len(S2) or (i < len(S1) and S1[i] < S2[j]):\n S[i + j] = S1[i]\n i += 1\n else:\n S[i + j] = S2[j]\n j += 1", "def merge(L, left, right):\n i = j = 0\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n L[i + j] = left[i]\n i += 1\n else:\n L[i + j] = right[j]\n j += 1\n while i < len(left):\n L[i + j] = left[i]\n i += 1\n while j < len(right):\n L[i + j] = right[j]\n j += 1", "def sort_list_pairs(list1, list2, **kwargs):\n order = kwargs.get('order', 'descending')\n\n if type(list1) == np.ndarray:\n list1 = list1.tolist()\n\n if type(list2) == np.ndarray:\n list2 = list2.tolist()\n list1, list2 = zip(*sorted(zip(list1, list2)))\n\n if order == 'descending':\n return list1[::-1], list2[::-1]\n elif order == 'ascending':\n return list1, list2", "def merge (t1,t2, cmp):\n n1 = len(t1)\n n2 = len(t2)\n t = [ 0 for i in range(0,n1+n2)]\n i = j = k = 0\n while i < n1 and j < n2:\n if cmp(t1[i],t2[j]) < 0:\n t[k] = t1[i]\n i = i + 1\n else:\n t[k] = t2[j]\n j = j + 1\n k = k + 1\n while i < n1:\n t[k] = t1[i]\n i = i + 1\n k = k + 1\n while j < n2:\n t[k] = t2[j]\n j = j + 1\n k = k + 1\n return t", "def merge(U:list, V:list, T:list) -> \"void\":\n\n\tinfinite = float(\"inf\")\n\ti, j = 0, 0\n\tU.append(infinite)\n\tV.append(infinite)\n\n\tfor k in range(0, len(U) + len(V)-2):\n\t\tif U[i] < V[j]:\n\t\t\tT[k] = U[i]\n\t\t\ti = i + 1\n\t\telse:\n\t\t\tT[k] = V[j]\n\t\t\tj = j + 1", "def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\r\n _m = 0\r\n for _n in range(n):\r\n while nums2[_n] > nums1[_m] and _m < m:\r\n _m += 1\r\n \r\n # shift all elements\r\n aux = nums1[_m]\r\n for i in range(_m, m):\r\n nums1[i + 1], aux = aux, nums1[i + 1]\r\n m += 1\r\n nums1[_m] = nums2[_n]", "def merge_sort(self, lst):\r\n [sorted_lst, number_of_inversions] = self.sort_and_get_number_of_inversions(lst)\r\n \r\n return sorted_lst", "def merge(left, right, inversions):\n\tmerged = []\n\til, ir = 0, 0\n\tlenl, lenr = len(left), len(right)\n\twhile il < lenl or ir < lenr:\n\t\tif il < lenl and ir < lenr:\n\t\t\tif left[il] <= right[ir]:\n\t\t\t\tmerged.append(left[il])\n\t\t\t\til += 1\n\t\t\telse:\n\t\t\t\telt = right[ir]\n\t\t\t\tmerged.append(elt)\n\t\t\t\t# elt occurs after elements in the left list, but is less\n\t\t\t\t# than all remaining elements in the left list. Therefore,\n\t\t\t\t# there are as many inversions of the form (i, elt) as\n\t\t\t\t# there are remaining elements in the left list.\n\t\t\t\tfor _ in xrange(lenl - il):\n\t\t\t\t\tinversions[elt] += 1\n\t\t\t\tir += 1\t\t\t\t\n\t\telif il < lenl:\n\t\t\tmerged.append(left[il])\n\t\t\til += 1\n\t\telse:\n\t\t\tmerged.append(right[ir])\n\t\t\tir += 1\n\treturn merged", "def merge(nums1, m, nums2, n):\r\n if not nums2:\r\n return\r\n\r\n while m > 0 and n > 0:\r\n # the current element of the final sorted array is the smaller of nums2[n -1] and nums1[m - 1]\r\n if nums2[n - 1] < nums1[m - 1]:\r\n nums1[m + n - 1] = nums1[m - 1]\r\n m -= 1\r\n else:\r\n nums1[m + n - 1] = nums2[n - 1]\r\n n -= 1\r\n nums1[:n] = nums2[:n]", "def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n k,i,j = 0,0,0\n while i< len(nums1) and j < len(nums2):\n if nums1[i]> nums2[j]:\n temp = nums1[i]\n nums1[i] = nums2[j]\n j+=1\n nums1.pop()\n nums1.insert(i+1,temp)\n else:\n i+=1\n lenght_left = len(nums1) - (len(nums2)-j)\n while j< len(nums2):\n nums1[lenght_left] = nums2[j]\n j+=1\n lenght_left+=1", "def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n idx = m+n-1\n m, n = m-1, n-1\n \n while idx>=0 and m>=0 and n>=0 :\n if nums1[m] > nums2[n] :\n nums1[idx]=nums1[m]\n m-=1\n else :\n nums1[idx]=nums2[n]\n n-=1\n idx-=1\n \n while n>=0 :\n nums1[idx]=nums2[n]\n idx, n = idx-1, n-1", "def merge(self, nums1: list, m: int, nums2: list, n: int) -> None:\r\n if nums1 == [] or nums2 == []: return\r\n nums1[:] = [nums1[i] for i in range(m)]+nums2\r\n nums1.sort()\r\n return nums1", "def merge(self, nums1, m: int, nums2, n: int) -> None:\n\n i = 0\n j = 0\n k = 0\n # [1, 2, 3, 1, 2, 3]\n # [2, 3, 4]\n while i < m and j < n:\n if nums1[m+i] < nums2[j]:\n nums1[k] = nums1[m+i]\n i += 1\n else:\n nums1[k] = nums2[j]\n j += 1\n k += 1\n\n if j < n:\n print(j)\n nums1[m+j:] = nums2[j:]", "def mergeSeq(left, right):\n i = j = 0\n result = []\n while i < len(left) and j < len(right):\n if left[i] <= right[j]:\n result.append(left[i])\n i += 1\n else:\n result.append(right[j])\n j += 1\n\n result += left[i:]\n result += right[j:]\n return result", "def merge_sort_aux(l, start1, last2):\n nonlocal c, w, r\n\n def merge(l, s1, l1, s2, l2): \n \"\"\"\n Sort the sublists and merge two halves\n \n Parameter\n ----------------------\n l: unsorted list\n list\n s1: the index of the first element of the 1st list (left side)\n int \n l1: the index of the last element of the 1st list (left side)\n int\n s2: the index of the first element of the 2nd list (right side)\n int\n l2: the index of the last element of the 2nd list (right side)\n int\n \"\"\"\n nonlocal c, r, w\n\n # Create temporary list to store sorted value\n tempList = l.copy() \n\n # Compare pairs of values of two list, start from the first element\n i = s1 # Beginning of the left list\n j = s2 # Beginning of the right list\n k = 0\n\n # Compare and add to temporary list\n c += 2\n while i <= l1 and j <= l2: \n c += 3\n r += 2 \n w += 1 \n if l[i] < l[j]:\n tempList[k] = l[i]\n i = i + 1\n k = k + 1 \n else:\n tempList[k] = l[j]\n j = j + 1\n k = k + 1\n\n # Copy remaining elements of the first list\n c += 1\n while i <= l1:\n tempList[k] = l[i]\n i = i + 1\n k = k + 1\n c += 1\n r += 1\n w += 1\n\n # Copy remaining elements of the second list \n c += 1\n while j <= l2:\n tempList[k] = l[j]\n j = j + 1\n k = k + 1\n c += 1\n r += 1\n w += 1\n\n # Copy elements from tempList to list l\n i = s1\n j = 0 \n c += 1\n while i <= l2:\n l[i] = tempList[j]\n i = i + 1\n j = j + 1\n c += 1\n w += 1 \n r += 1 \n \n # Split the list to sublist untill size become one\n c += 1\n if start1 < last2:\n last1 = (start1 + last2) // 2 \n start2 = last1 + 1\n merge_sort_aux(l, start1, last1) #the left side\n merge_sort_aux(l, start2, last2) #the right side\n # Call merge function to merge subarrays \n merge(l, start1, last1, start2, last2)", "def merge(self, nums1: list, m: int, nums2: list, n: int) -> None:\n if nums2 == []:\n return nums1\n \n i = len(nums1) - 1\n j = n - 1\n \n while j >= 0 and m - 1 >= 0:\n if nums2[j] >= nums1[m - 1]:\n nums1[i] = nums2[j]\n i -= 1\n j -= 1\n else:\n nums1[i] = nums1[m - 1]\n i -= 1\n m -= 1\n \n if j < 0:\n return\n else:\n while i >= 0:\n nums1[i] = nums2[j]\n i -= 1\n j -= 1", "def merge(self, nums1: [int], m: int, nums2: [int], n: int) -> None:\n for i in range(m, len(nums1)):\n del nums1[m]\n for i in range(n, len(nums2)):\n del nums2[n]\n\n nums1 += nums2\n nums1.sort()" ]
[ "0.7819038", "0.7720342", "0.7684522", "0.75869024", "0.7551841", "0.7532117", "0.74882025", "0.74285775", "0.7416055", "0.7384424", "0.7365451", "0.73496974", "0.728718", "0.72749096", "0.7255807", "0.72539026", "0.7162134", "0.71429527", "0.71196735", "0.7114966", "0.7099433", "0.7094018", "0.70485944", "0.70427716", "0.70416856", "0.7031543", "0.7014027", "0.6965687", "0.69545156", "0.68724513", "0.6848798", "0.6834175", "0.6829853", "0.6815716", "0.68095654", "0.6801674", "0.67981696", "0.6797778", "0.6768197", "0.6757564", "0.6737284", "0.6730763", "0.6713261", "0.6694505", "0.6691308", "0.6676729", "0.6664767", "0.6645044", "0.6613647", "0.66118085", "0.65608245", "0.65575564", "0.65504056", "0.6532724", "0.6485063", "0.6483875", "0.6478932", "0.6462978", "0.6457498", "0.6457173", "0.6456285", "0.644605", "0.6444665", "0.6420935", "0.64200497", "0.6413915", "0.64062", "0.63830507", "0.6370619", "0.63598365", "0.6352758", "0.63384485", "0.63369393", "0.632265", "0.63058066", "0.63049203", "0.6292622", "0.62872976", "0.6285221", "0.6275156", "0.626909", "0.62490225", "0.62431973", "0.6232504", "0.6225923", "0.6225466", "0.6208064", "0.61901695", "0.61842895", "0.6178196", "0.6169856", "0.6165992", "0.61641026", "0.61537904", "0.6152133", "0.6147474", "0.61290485", "0.61156136", "0.6085674", "0.60838014" ]
0.7145039
17
Retrieve the current values of the RAMSTKMode data model attributes.
def get_attributes(self): _attributes = { 'function_id': self.function_id, 'hardware_id': self.hardware_id, 'mode_id': self.mode_id, 'critical_item': self.critical_item, 'description': self.description, 'design_provisions': self.design_provisions, 'detection_method': self.detection_method, 'effect_end': self.effect_end, 'effect_local': self.effect_local, 'effect_next': self.effect_next, 'effect_probability': self.effect_probability, 'hazard_rate_source': self.hazard_rate_source, 'isolation_method': self.isolation_method, 'mission': self.mission, 'mission_phase': self.mission_phase, 'mode_criticality': self.mode_criticality, 'mode_hazard_rate': self.mode_hazard_rate, 'mode_op_time': self.mode_op_time, 'mode_probability': self.mode_probability, 'mode_ratio': self.mode_ratio, 'operator_actions': self.operator_actions, 'other_indications': self.other_indications, 'remarks': self.remarks, 'rpn_severity': self.rpn_severity, 'rpn_severity_new': self.rpn_severity_new, 'severity_class': self.severity_class, 'single_point': self.single_point, 'type_id': self.type_id } return _attributes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def device_state_attributes(self):\n # TODO: convert RH from Elk to AH ?\n #if self.current_humidity > 0:\n # humidity = self.current_humidity\n data = {\n 'hidden': self._hidden,\n 'temp_unit' : self.temperature_unit,\n }\n if self._device.temp_outside is not None and self._device.temp_outside > -460:\n data['temp_outside'] = self._device.temp_outside\n if self._device.temp_3 is not None and self._device.temp_3 > -460:\n data['temp_3'] = self._device.temp_3\n if self._device.temp_4 is not None and self._device.temp_4 > -460:\n data['temp_4'] = self._device.temp_4\n return data", "def device_state_attributes(self):\n if self._type == ATTR_CAQI:\n self._attrs[ATTR_CAQI_LEVEL] = self.data[ATTR_CAQI_LEVEL]\n if self._type == ATTR_PM25:\n self._attrs[ATTR_LIMIT] = self.data[ATTR_PM25_LIMIT]\n self._attrs[ATTR_PERCENT] = round(self.data[ATTR_PM25_PERCENT])\n if self._type == ATTR_PM10:\n self._attrs[ATTR_LIMIT] = self.data[ATTR_PM10_LIMIT]\n self._attrs[ATTR_PERCENT] = round(self.data[ATTR_PM10_PERCENT])\n return self._attrs", "def device_state_attributes(self):\n if self.airly.data_available:\n if self.type == ATTR_CAQI_DESCRIPTION:\n self._attrs[ATTR_CAQI_ADVICE] = (self.airly.data\n [ATTR_CAQI_ADVICE])\n if self.type == ATTR_CAQI:\n self._attrs[ATTR_CAQI_LEVEL] = self.airly.data[ATTR_CAQI_LEVEL]\n if self.type == ATTR_PM25:\n self._attrs[ATTR_LIMIT] = self.airly.data[ATTR_PM25_LIMIT]\n self._attrs[ATTR_PERCENT] = (round(self.airly.data\n [ATTR_PM25_PERCENT]))\n if self.type == ATTR_PM10:\n self._attrs[ATTR_LIMIT] = self.airly.data[ATTR_PM10_LIMIT]\n self._attrs[ATTR_PERCENT] = (round(self.airly.data\n [ATTR_PM10_PERCENT]))\n return self._attrs", "def values(self):\n return self._modes.values()", "def set_attributes(self, attributes):\n _error_code = 0\n _msg = \"RAMSTK SUCCESS: Updating RAMSTKMode {0:d} attributes.\". \\\n format(self.hardware_id)\n\n try:\n self.critical_item = int(\n none_to_default(attributes['critical_item'], 0))\n self.description = str(\n none_to_default(attributes['description'],\n 'Failure Mode Description'))\n self.design_provisions = str(\n none_to_default(attributes['design_provisions'], ''))\n self.detection_method = str(\n none_to_default(attributes['detection_method'], ''))\n self.effect_end = str(\n none_to_default(attributes['effect_end'], 'End Effect'))\n self.effect_local = str(\n none_to_default(attributes['effect_local'], 'Local Effect'))\n self.effect_next = str(\n none_to_default(attributes['effect_next'], 'Next Effect'))\n self.effect_probability = float(\n none_to_default(attributes['effect_probability'], 0.0))\n self.hazard_rate_source = str(\n none_to_default(attributes['hazard_rate_source'], ''))\n self.isolation_method = str(\n none_to_default(attributes['isolation_method'], ''))\n self.mission = str(none_to_default(attributes['mission'], ''))\n self.mission_phase = str(\n none_to_default(attributes['mission_phase'], ''))\n self.mode_criticality = float(\n none_to_default(attributes['mode_criticality'], 0.0))\n self.mode_hazard_rate = float(\n none_to_default(attributes['mode_hazard_rate'], 0.0))\n self.mode_op_time = float(\n none_to_default(attributes['mode_op_time'], 0.0))\n self.mode_probability = str(\n none_to_default(attributes['mode_probability'], ''))\n self.mode_ratio = float(\n none_to_default(attributes['mode_ratio'], 0.0))\n self.operator_actions = str(\n none_to_default(attributes['operator_actions'], ''))\n self.other_indications = str(\n none_to_default(attributes['other_indications'], ''))\n self.remarks = str(none_to_default(attributes['remarks'], ''))\n self.rpn_severity = int(\n none_to_default(attributes['rpn_severity'], 1))\n self.rpn_severity_new = int(\n none_to_default(attributes['rpn_severity_new'], 1))\n self.severity_class = str(\n none_to_default(attributes['severity_class'], ''))\n self.single_point = int(\n none_to_default(attributes['single_point'], 0))\n self.type_id = int(none_to_default(attributes['type_id'], 0))\n except KeyError as _err:\n _error_code = 40\n _msg = \"RAMSTK ERROR: Missing attribute {0:s} in attribute \" \\\n \"dictionary passed to \" \\\n \"RAMSTKMode.set_attributes().\".format(_err)\n\n return _error_code, _msg", "def device_state_attributes(self):\r\n return self.attributes", "def device_state_attributes(self):\n if self.ticker is not None:\n return {\n ATTR_VOLUME_24H: self.ticker.values.get(\"volume\"),\n ATTR_ATTRIBUTION: ATTRIBUTION,\n ATTR_HIGH: self.ticker.values.get(\"high\"),\n ATTR_LOW: self.ticker.values.get(\"low\"),\n ATTR_VWAP: self.ticker.values.get(\"vwap\")\n }", "def device_state_attributes(self):\n return self.attr", "def device_state_attributes(self):\n return self.attr", "def read_global_attributes(self):\n return self._attrs.keys()", "def device_state_attributes(self):\n return self.custom_attributes", "def device_state_attributes(self):\n return self._emeter_params", "def device_state_attributes(self):\n data = {}\n if self._is_dimmable and self._brightness_pct:\n data = {ATTR_BRIGHTNESS_PCT: self._brightness_pct}\n data.update({#'alarm': self._alarm,\n 'operation_mode': self.operation_mode,\n 'rssi': self._rssi,\n 'occupancy': self._occupancy,\n 'wattage_override': self._wattage_override,\n 'id': self._id})\n return data", "def device_state_attributes(self):\n attr = {}\n attr['remote_lock'] = self.remote_lock\n attr['power_state'] = self._power_state\n attr['heating_active'] = self._is_heating_active\n attr['auto_override'] = self.auto_override\n attr['sensor_mode'] = self.sensor_mode\n attr['external_sensor_temprange'] = self.external_temp\n attr['deadzone_sensor_temprange'] = self.deadzone_sensor_temprange\n attr['loop_mode'] = self._loop_mode\n attr['roomtemp_offset'] = self.roomtemp_offset\n attr['anti_freeze_function'] = self.anti_freeze_function\n attr['poweron_mem'] = self.poweron_mem\n attr['external_temp'] = self.external_temp\n attr['clock_hour'] = self.clock_hour\n attr['clock_min'] = self.clock_min\n attr['clock_sec'] = self.clock_sec\n attr['day_of_week'] = self.day_of_week\n attr['week_day'] = self.week_day\n attr['week_end'] = self.week_end\n return attr", "def device_state_attributes(self):\n if self._xfinity_data.total_usage is None:\n return None\n\n res = {ATTR_ATTRIBUTION: ATTRIBUTION}\n res[ATTR_TOTAL_USAGE] = self._xfinity_data.total_usage\n res[ATTR_ALLOWED_USAGE] = self._xfinity_data.allowed_usage\n res[ATTR_REMAINING_USAGE] = self._xfinity_data.remaining_usage\n return res", "def device_state_attributes(self):\r\n return self._attributes", "def device_state_attributes(self):\n # Move these to Thermostat Device and make them global\n return {\n \"current_humidity\": self._current_humidity,\n \"status\": self._current_state,\n \"program\": self._current_program,\n \"away_mode\": self._away\n }", "def device_state_attributes(self):\n if self._data is not None:\n return {\n \"阳历\": self._data.yangli,\n \"阴历\": self._data.yinli,\n \"五行\": self._data.wuxing,\n \"冲煞\": self._data.chongsha,\n \"百忌\": self._data.baiji,\n \"吉神\": self._data.jishen,\n \"宜\": self._data.yi,\n \"凶神\": self._data.xiongshen,\n \"忌\": self._data.ji,\n }", "def device_state_attributes(self):\n # attributes = super().device_state_attributes\n _config_attrib = self._product.get_data_config_json()\n return _config_attrib", "def listglobal(self):\n return list(self.attributes.keys())", "def value_items(self):\n if self._parent:\n return [ WARP_MODE_NAMES[x] for x in self._parent.available_warp_modes ]\n return ()", "def device_state_attributes(self):\n attributes = self._attrs\n\n for variable in self._sid_data[\"sid_attr\"]:\n if variable in self._data:\n attributes[format_attribute(variable)] = self._data[variable]\n\n return attributes", "def device_state_attributes(self):\n\n attr = {\n \"uiclass\": self.tahoma_device.uiclass,\n \"widget\": self.tahoma_device.widget,\n \"type\": self.tahoma_device.type,\n }\n\n if CORE_RSSI_LEVEL_STATE in self.tahoma_device.active_states:\n attr[ATTR_RSSI_LEVEL] = self.tahoma_device.active_states[\n CORE_RSSI_LEVEL_STATE\n ]\n\n # TODO Parse 'lowBattery' for low battery warning. 'dead' for not available.\n # \"dead\", \"lowBattery\", \"maintenanceRequired\", \"noDefect\"\n if CORE_SENSOR_DEFECT_STATE in self.tahoma_device.active_states:\n attr[ATTR_BATTERY_LEVEL] = self.tahoma_device.active_states[\n CORE_SENSOR_DEFECT_STATE\n ]\n\n return attr", "def device_state_attributes(self):\n return self._attribute", "def device_state_attributes(self):\n return self._attribute", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n # attributes = super().device_state_attributes\n attributes = {ATTR_UNIT_OF_MEASUREMENT: self._unit}\n return attributes", "def device_state_attributes(self):\n return self._attrs", "def read(self):\n return self.get_attr().Value()", "def ramdata(self):\n data = self._ftdi.spi_read(self.RAMDATA_ADDR, len=self.RAMDATA_N, burst='fixed')\n return [w & self.RAMDATA_MASK for w in data]", "def get_memory(self):\n return (self.K.get_value(), self.V.get_value(), self.A.get_value())", "def device_state_attributes(self):\n while len(self._device.prices) < 4:\n self._device.prices.append(\"None\")\n attrs = {\n \"device_name\": self._device.name,\n \"description\": self.description,\n \"unit_of_measurement\": self._device.price_currency,\n \"product_id\": self.product_id,\n \"price1\": self._device.prices[0],\n \"price2\": self._device.prices[1],\n \"price3\": self._device.prices[2],\n \"price4\": self._device.prices[3],\n }\n return attrs", "def GetAttributes(self):\r\n\r\n return self._attr", "def device_state_attributes(self): # Can be remove from 0.99\n return self._attr", "def device_state_attributes(self) -> str:\n return {\n \"remo_device_id\": self._remo_device.id,\n \"remo_device_name\": self._remo_device.name,\n \"remo_firmware_version\": self._remo_device.firmware_version,\n \"remo_temperature_offset\": self._remo_device.temperature_offset,\n \"remo_humidity_offset\": self._remo_device.humidity_offset\n }", "def device_state_attributes(self):\n attr = {}\n attr[\"enabled\"] = self._controller.enabled\n attr[\"zone_count\"] = len(self._controller._zones)\n attr[\"zones\"] = \"\"\n current = self._controller.runs.current_run\n if current is not None:\n attr[\"current_zone\"] = current.index + 1\n attr[\"current_name\"] = current.zone.name\n attr[\"current_start\"] = dt.as_local(current.start_time)\n attr[\"current_duration\"] = str(current.duration)\n attr[\"time_remaining\"] = str(current.time_remaining)\n attr[\"percent_complete\"] = current.percent_complete\n else:\n attr[\"current_schedule\"] = RES_NOT_RUNNING\n attr[\"percent_complete\"] = 0\n\n next = self._controller.runs.next_run\n if next is not None:\n attr[\"next_zone\"] = next.index + 1\n attr[\"next_name\"] = next.zone.name\n attr[\"next_start\"] = dt.as_local(next.start_time)\n attr[\"next_duration\"] = str(next.duration)\n else:\n attr[\"next_schedule\"] = RES_NONE\n\n return attr", "def read_all_ram(self):\n return self.RAM", "def state_attributes(self):\n return self._vehicle.data", "def device_state_attributes(self):\n return self._ba_attrs", "def getAttributes(self):\n pass", "def getMyInfoAsDict(self):\n list = ['name', 'version', 'systemSize', 'xMax', \n 'yMax', 'currentRound', 'currentHoursLeft']\n d = self.getSelectedAttr(list)\n return d", "def extra_state_attributes(self):\n\n attrs = {\n \"device_id\": self._device_id,\n \"is_smart_program\": self._program.get(\"is_smart_program\", False),\n \"frequency\": self._program.get(\"frequency\"),\n \"start_times\": self._program.get(\"start_times\"),\n \"budget\": self._program.get(\"budget\"),\n \"program\": self._program.get(\"program\"),\n \"run_times\": self._program.get(\"run_times\"),\n }\n\n return attrs", "def current_settings(self):\n return {\n 'power_state': self.power_state,\n 'brightness': self.brightness,\n }", "def device_state_attributes(self):\n\n state = {\n \"car\": self._licenseplate,\n \"vin\": self._vin,\n \"retrievalstatus\": self._get_car_value(\n self._feature_name,\n self._object_name,\n \"retrievalstatus\",\n \"error\"\n ),\n }\n if self._extended_attributes is not None:\n for attrib in self._extended_attributes:\n\n retrievalstatus = self._get_car_value(self._feature_name, attrib,\n \"retrievalstatus\", \"error\")\n\n if retrievalstatus == \"VALID\":\n state[attrib] = self._get_car_value(\n self._feature_name, attrib, \"value\", \"error\"\n )\n\n if retrievalstatus == \"NOT_RECEIVED\":\n state[attrib] = \"NOT_RECEIVED\"\n return state", "def get_attributes(self):\n\n endpoint = self._get_api_endpoint() + '/attributes'\n results = self.tq.get(endpoint, withp='attribute')\n if 'data' not in results:\n return {}\n\n return results['data']\n # tr = {}\n # for attribute in results['data']:\n # tr[attribute['attribute']['name']] = attribute['value']\n # return tr", "def extra_state_attributes(self):\n attr = {}\n #attr[\"name\"] = self._visonic_device.getDeviceName()\n attr[PANEL_ATTRIBUTE_NAME] = self._panel\n attr[DEVICE_ATTRIBUTE_NAME] = self._visonic_device.getDeviceID()\n return attr", "def get_material_features(self):\n return self.material_features", "def values(self):\n return self.attrs.values()", "def extra_state_attributes(self):\n attr = self._attributes\n if self.tesla_device.has_battery():\n attr[ATTR_BATTERY_LEVEL] = self.tesla_device.battery_level()\n attr[ATTR_BATTERY_CHARGING] = self.tesla_device.battery_charging()\n return attr", "def extra_state_attributes(self) -> dict[str, Any]:\n return {\n \"heat_demand\": self._device.heat_demand,\n \"heat_demands\": self._device.heat_demands,\n \"relay_demands\": self._device.relay_demands,\n \"system_mode\": self._device.system_mode,\n \"tpi_params\": self._device.tpi_params,\n # \"faults\": self._device.faultlog,\n }", "def data(self):\n\t\treturn vars(self)", "def valuerefs(self):\r\n return self.data.values()", "def extra_state_attributes(self) -> dict[str, bool | int]:\n attr = {}\n\n if self._device.offset is not None:\n attr[ATTR_OFFSET] = self._device.offset\n\n if self._device.valve is not None:\n attr[ATTR_VALVE] = self._device.valve\n\n if self._device.locked is not None:\n attr[ATTR_LOCKED] = self._device.locked\n\n return attr", "def get_attributes(self):\n return self._attributes_cache", "def state_attributes(self):\n return self._attributes", "def state_attributes(self):\n return self._attributes", "def getAttributes(self):\n return self.attributes", "def getAttributes(self):\n return self.attributes", "def get_RAM(self):\n return self._cached('ram', self.ale.getRAM)", "def attributes(self):\n return [self._ELE_ATTR]", "def device_state_attributes(self):\n attributes = {}\n\n if self._type == \"weather\":\n attributes[\"data\"] = self._connector.get_condition_hourly()\n elif self._type == \"weather_report\":\n attributes[\"data\"] = self._connector.get_weather_report()\n elif self._type == \"temperature\":\n attributes[\"data\"] = self._connector.get_temperature_hourly()\n elif self._type == \"dewpoint\":\n attributes[\"data\"] = self._connector.get_dewpoint_hourly()\n elif self._type == \"pressure\":\n attributes[\"data\"] = self._connector.get_pressure_hourly()\n elif self._type == \"wind_speed\":\n attributes[\"data\"] = self._connector.get_wind_speed_hourly()\n elif self._type == \"wind_direction\":\n attributes[\"data\"] = self._connector.get_wind_direction_hourly()\n elif self._type == \"wind_gusts\":\n attributes[\"data\"] = self._connector.get_wind_gusts_hourly()\n elif self._type == \"precipitation\":\n attributes[\"data\"] = self._connector.get_precipitation_hourly()\n elif self._type == \"precipitation_probability\":\n attributes[\"data\"] = self._connector.get_precipitation_probability_hourly()\n elif self._type == \"precipitation_duration\":\n attributes[\"data\"] = self._connector.get_precipitation_duration_hourly()\n elif self._type == \"cloud_coverage\":\n attributes[\"data\"] = self._connector.get_cloud_coverage_hourly()\n elif self._type == \"visibility\":\n attributes[\"data\"] = self._connector.get_visibility_hourly()\n elif self._type == \"sun_duration\":\n attributes[\"data\"] = self._connector.get_sun_duration_hourly()\n elif self._type == \"sun_irradiance\":\n attributes[\"data\"] = self._connector.get_sun_irradiance_hourly()\n elif self._type == \"fog_probability\":\n attributes[\"data\"] = self._connector.get_fog_probability_hourly()\n elif self._type == \"humidity\":\n attributes[\"data\"] = self._connector.get_humidity_hourly()\n\n attributes[ATTR_ISSUE_TIME] = self._connector.infos[ATTR_ISSUE_TIME]\n attributes[ATTR_LATEST_UPDATE] = self._connector.infos[ATTR_LATEST_UPDATE]\n attributes[ATTR_STATION_ID] = self._connector.infos[ATTR_STATION_ID]\n attributes[ATTR_STATION_NAME] = self._connector.infos[ATTR_STATION_NAME]\n attributes[ATTR_ATTRIBUTION] = ATTRIBUTION\n return attributes", "def _read_state(self) -> None:\n value = self._nobo.get_current_component_temperature(self._id)\n if value is None:\n self._attr_native_value = None\n else:\n self._attr_native_value = round(float(value), 1)", "def info(self):\n return {\n \"learning_rate\": self.learning_rate,\n \"learning_rate_decay\": self.learning_rate_decay,\n \"training_epochs\": self.training_epochs,\n \"batch_size\": self.batch_size,\n \"training_history\": self.training_history,\n \"iteration\": self.iteration,\n \"features\": self.featureset.as_dict()\n }", "def _get_mode(self):\n self._validate_mode()\n return deepcopy(self.mode)", "def device_state_attributes(self):\n return {\"uuid\": self.uuidAction, \"room\": self.room,\n \"category\": self.cat,\n \"selected_scene\": self.effect,\n \"device_typ\": self.type, \"plattform\": \"loxone\"}", "def mode(self):\n return self._data.get('mode', None)", "def state(self):\n return self.device.device_data[self.device_id]['temperature']", "def get_attributes(self):\n return self.attributes", "def currentMode(self):\n logger.debug(\"Func: currentMode/getter\")\n\n return self._currentsDict[\"currentMode\"]", "def get_state(self):\n xml = self.env.sim.model.get_xml() # model xml file\n state = np.array(self.env.sim.get_state().flatten()) # simulator state\n return dict(model=xml, states=state)", "def device_state_attributes(self):\n attr = {}\n attr[\"enabled\"] = self._zone.enabled and self._controller.enabled\n attr[\"status\"] = self._zone.status\n attr[\"schedule_count\"] = len(self._zone.schedules)\n attr[\"schedules\"] = \"\"\n attr[\"adjustment\"] = self._zone.adjustment.as_string\n current = self._zone.runs.current_run\n if current is not None:\n if current.schedule is not None:\n attr[\"current_schedule\"] = current.schedule.schedule_index + 1\n attr[\"current_name\"] = current.schedule.name\n else:\n attr[\"current_schedule\"] = RES_MANUAL\n attr[\"current_name\"] = RES_MANUAL\n attr[\"current_start\"] = dt.as_local(current.start_time)\n attr[\"current_duration\"] = str(current.duration)\n attr[\"time_remaining\"] = str(current.time_remaining)\n attr[\"percent_complete\"] = current.percent_complete\n else:\n attr[\"current_schedule\"] = RES_NOT_RUNNING\n attr[\"percent_complete\"] = 0\n\n next = self._zone.runs.next_run\n if next is not None:\n if next.schedule is not None:\n attr[\"next_schedule\"] = next.schedule.schedule_index + 1\n attr[\"next_name\"] = next.schedule.name\n else:\n attr[\"next_schedule\"] = RES_MANUAL\n attr[\"next_name\"] = RES_MANUAL\n attr[\"next_start\"] = dt.as_local(next.start_time)\n attr[\"next_duration\"] = str(next.duration)\n else:\n attr[\"next_schedule\"] = RES_NONE\n\n return attr", "def device_state_attributes(self):\n return self._state_attributes", "def state_attributes(self):\n attrs = {\"access_token\": self.access_tokens[-1]}\n\n if self.model:\n attrs[\"model_name\"] = self.model\n\n if self.brand:\n attrs[\"brand\"] = self.brand\n\n if self.motion_detection_enabled:\n attrs[\"motion_detection\"] = self.motion_detection_enabled\n\n if self.supports_doorbell_chime:\n attrs[\"doorbell_chime\"] = self.supports_doorbell_chime\n\n return attrs", "def device_state_attributes(self):\n return {\n \"load_shedding_active\": self.coordinator.data.get(\"load_shedding_active\"),\n }", "def device_state_attributes(self):\n node = self.gateway.sensors[self.node_id]\n child = node.children[self.child_id]\n attr = {\n ATTR_BATTERY_LEVEL: node.battery_level,\n ATTR_HEARTBEAT: node.heartbeat,\n ATTR_CHILD_ID: self.child_id,\n ATTR_DESCRIPTION: child.description,\n ATTR_DEVICE: self.gateway.device,\n ATTR_NODE_ID: self.node_id,\n }\n\n set_req = self.gateway.const.SetReq\n\n for value_type, value in self._values.items():\n attr[set_req(value_type).name] = value\n\n return attr", "def get_state(self) -> Dict:\n return {\n \"patience\": self.patience,\n \"cooldown\": self.cooldown,\n \"cooldown_counter\": self.cooldown_counter,\n \"mode\": self.mode,\n \"threshold\": self.threshold,\n \"threshold_mode\": self.threshold_mode,\n \"best\": self.best,\n \"num_bad_epochs\": self.num_bad_epochs,\n \"mode_worse\": self.mode_worse,\n \"last_epoch\": self.last_epoch,\n }", "def device_state_attributes(self):\n return self._state_attrs", "def device_state_attributes(self):\n return self._state_attrs", "def get(self):\n\t\treturn {\n\t\t\t'system': self.get_system_information(),\n\t\t\t'cpu': self.get_cpu_stats(),\n\t\t\t'gpu': self.get_gpu_stats(),\n\t\t\t'ram': self.get_ram_stats(),\n\t\t\t'storage': self.get_storage_stats(),\n\t\t\t'battery': self.get_battery_stats(),\n\t\t\t'temps': self.get_temperatures()\n\t\t}", "def extra_restore_state_data(self) -> SensorExtraStoredData:\n return SensorExtraStoredData(self.native_value, self.native_unit_of_measurement)", "def extra_state_attributes(self):\n if self.data.rate is not None:\n return {\n ATTR_EXCHANGE_RATE: self.data.rate[\"rates\"][self._target],\n ATTR_TARGET: self._target,\n }", "def get_all_attribute(self):\n for attr, value in self.__dict__.items():\n print(attr, value)", "def device_state_attributes(self):\n return {\n ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,\n ATTR_CAMERA_TYPE: self._camera_type,\n }", "def device_state_attributes(self):\n ret = {\n ATTR_ENTITY_ID: self._entity_id,\n ATTR_COEFFICIENTS: self._coefficients,\n CONF_TRACKED_ENTITY_ID: self._tracked_entity_id,\n ATTR_BASE_SENSOR: self._entity_id.replace(\"_calibrated\", \"\"),\n CONF_MQTT_TOPIC: self._mqtt_topic,\n CONF_DATAPOINTS: self._datapoints,\n }\n if self._attribute:\n ret[ATTR_ATTRIBUTE] = self._attribute\n if self._attributes:\n ret.update(self._attributes)\n return ret", "def get_attribute_list(self):\n return self.dp.get_attribute_list()", "def device_state_attributes(self) -> Dict[str, any]:\n return self._device.state_attributes", "def device_state_attributes(self):\n return {ATTR_ATTRIBUTION: ATTRIBUTION}", "def device_state_attributes(self):\n return {ATTR_ATTRIBUTION: ATTRIBUTION}" ]
[ "0.61570674", "0.6085479", "0.6055138", "0.6028234", "0.60274136", "0.60102427", "0.59539336", "0.5916759", "0.5916759", "0.58789027", "0.58387506", "0.58357614", "0.58318275", "0.58289427", "0.57944816", "0.57702005", "0.57553905", "0.5739991", "0.57087195", "0.5681874", "0.56320506", "0.5626173", "0.56257397", "0.5613137", "0.5613137", "0.5600343", "0.5600343", "0.5600343", "0.5600343", "0.5600343", "0.5600343", "0.5600343", "0.5600343", "0.5600343", "0.5600343", "0.5600343", "0.5600343", "0.5600343", "0.5578224", "0.55754024", "0.55634826", "0.5563317", "0.5557668", "0.5547476", "0.5546459", "0.5543334", "0.5531478", "0.5530075", "0.5502859", "0.5501952", "0.5498018", "0.5489028", "0.5487019", "0.54754925", "0.54737735", "0.54729724", "0.54579127", "0.5449699", "0.544928", "0.5433595", "0.5432734", "0.54212576", "0.5416501", "0.5401393", "0.539805", "0.5385595", "0.53854483", "0.53854483", "0.53853565", "0.53853565", "0.53766394", "0.53729635", "0.5370541", "0.53677195", "0.5364936", "0.53638613", "0.5353751", "0.5351725", "0.5327297", "0.5326975", "0.5324943", "0.5317861", "0.5312885", "0.5309239", "0.53018117", "0.52985924", "0.5290981", "0.5285914", "0.52729136", "0.52729136", "0.5263418", "0.52567685", "0.5256463", "0.525233", "0.52384293", "0.52315754", "0.52146876", "0.52140653", "0.52120996", "0.52120996" ]
0.54526293
57
Set the current values of the RAMSTKMode data model attributes.
def set_attributes(self, attributes): _error_code = 0 _msg = "RAMSTK SUCCESS: Updating RAMSTKMode {0:d} attributes.". \ format(self.hardware_id) try: self.critical_item = int( none_to_default(attributes['critical_item'], 0)) self.description = str( none_to_default(attributes['description'], 'Failure Mode Description')) self.design_provisions = str( none_to_default(attributes['design_provisions'], '')) self.detection_method = str( none_to_default(attributes['detection_method'], '')) self.effect_end = str( none_to_default(attributes['effect_end'], 'End Effect')) self.effect_local = str( none_to_default(attributes['effect_local'], 'Local Effect')) self.effect_next = str( none_to_default(attributes['effect_next'], 'Next Effect')) self.effect_probability = float( none_to_default(attributes['effect_probability'], 0.0)) self.hazard_rate_source = str( none_to_default(attributes['hazard_rate_source'], '')) self.isolation_method = str( none_to_default(attributes['isolation_method'], '')) self.mission = str(none_to_default(attributes['mission'], '')) self.mission_phase = str( none_to_default(attributes['mission_phase'], '')) self.mode_criticality = float( none_to_default(attributes['mode_criticality'], 0.0)) self.mode_hazard_rate = float( none_to_default(attributes['mode_hazard_rate'], 0.0)) self.mode_op_time = float( none_to_default(attributes['mode_op_time'], 0.0)) self.mode_probability = str( none_to_default(attributes['mode_probability'], '')) self.mode_ratio = float( none_to_default(attributes['mode_ratio'], 0.0)) self.operator_actions = str( none_to_default(attributes['operator_actions'], '')) self.other_indications = str( none_to_default(attributes['other_indications'], '')) self.remarks = str(none_to_default(attributes['remarks'], '')) self.rpn_severity = int( none_to_default(attributes['rpn_severity'], 1)) self.rpn_severity_new = int( none_to_default(attributes['rpn_severity_new'], 1)) self.severity_class = str( none_to_default(attributes['severity_class'], '')) self.single_point = int( none_to_default(attributes['single_point'], 0)) self.type_id = int(none_to_default(attributes['type_id'], 0)) except KeyError as _err: _error_code = 40 _msg = "RAMSTK ERROR: Missing attribute {0:s} in attribute " \ "dictionary passed to " \ "RAMSTKMode.set_attributes().".format(_err) return _error_code, _msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_parameters(self, mode, data):\n if mode == 'design' or self.local_design:\n self.new_design = True\n\n for key, dc in self.variables.items():\n if isinstance(dc, dc_cp):\n if ((mode == 'offdesign' and not self.local_design) or\n (mode == 'design' and self.local_offdesign)):\n self.get_attr(key).design = data[key]\n\n else:\n self.get_attr(key).design = np.nan", "def set_attributes(self):\n s = _setter(oself=self, e1=NameError, e2=AttributeError)\n\n s('oself.coef_ = oself.model.coef_')\n s('oself.intercept_ = oself.model.intercept_')\n\n self.time_prepare = None\n s('oself.time_prepare = oself.model.time_prepare')\n self.time_upload_data = None\n s('oself.time_upload_data = oself.model.time_upload_data')\n self.time_fitonly = None\n s('oself.time_fitonly = oself.model.time_fitonly')", "def set_attributes(self, attributes):\n\n _error_code = 0\n _msg = \"RAMSTK SUCCESS: Updating RAMSTKSurvivalData {0:d} attributes.\". \\\n format(self.record_id)\n\n try:\n self.name = str(none_to_default(attributes[0], ''))\n self.source_id = int(none_to_default(attributes[1], 0))\n self.failure_date = none_to_default(attributes[2], date.today())\n self.left_interval = float(none_to_default(attributes[3], 0.0))\n self.right_interval = float(none_to_default(attributes[4], 0.0))\n self.status_id = int(none_to_default(attributes[5], 0))\n self.quantity = int(none_to_default(attributes[6], 0))\n self.tbf = float(none_to_default(attributes[7], 0.0))\n self.mode_type_id = int(none_to_default(attributes[8], 0))\n self.nevada_chart = int(none_to_default(attributes[9], 0))\n self.ship_date = none_to_default(attributes[10], date.today())\n self.number_shipped = int(none_to_default(attributes[11], 0))\n self.return_date = none_to_default(attributes[12], date.today())\n self.number_returned = int(none_to_default(attributes[13], 0))\n self.user_float_1 = float(none_to_default(attributes[14], 0.0))\n self.user_float_2 = float(none_to_default(attributes[15], 0.0))\n self.user_float_3 = float(none_to_default(attributes[16], 0.0))\n self.user_integer_1 = int(none_to_default(attributes[17], 0))\n self.user_integer_2 = int(none_to_default(attributes[18], 0))\n self.user_integer_3 = int(none_to_default(attributes[19], 0))\n self.user_string_1 = str(none_to_default(attributes[20], ''))\n self.user_string_2 = str(none_to_default(attributes[21], ''))\n self.user_string_3 = str(none_to_default(attributes[22], ''))\n except IndexError as _err:\n _error_code = error_handler(_err.args)\n _msg = \"RAMSTK ERROR: Insufficient number of input values to \" \\\n \"RAMSTKSurvivalData.set_attributes().\"\n except (TypeError, ValueError) as _err:\n _error_code = error_handler(_err.args)\n _msg = \"RAMSTK ERROR: Incorrect data type when converting one or \" \\\n \"more RAMSTKSurvivalData attributes.\"\n\n return _error_code, _msg", "def set_memory_mode(self):\n self.debug_print('set memory mode')\n self.send_com(0x08)", "def setFeatureAttributes(self, newFeature, editBuffer=None):\n #setting the attributes using the reclassification dictionary\n for attribute in self.reclassificationDict[self.category][self.edgvClass][self.buttonName].keys():\n idx = newFeature.fieldNameIndex(attribute)\n #value to be changed\n value = self.reclassificationDict[self.category][self.edgvClass][self.buttonName][attribute]\n if value == '':\n continue\n #actual attribute change\n if editBuffer:\n #this way we are working with the edit buffer\n editBuffer.changeAttributeValue(newFeature.id(), idx, value)\n else:\n #this way are working with selected features and inserting a new one in the layer\n newFeature.setAttribute(idx, value)\n \n if not editBuffer:\n # we should return when under the normal behavior\n return newFeature", "def set_r14(self):\r\n self.decrement_sp()\r\n self.set_a_to_m()\r\n self.set_d_to_m()\r\n self.at_var(\"R14\")\r\n self.set_m_to_d()", "def set_r15(self):\r\n self.decrement_sp()\r\n self.set_a_to_m()\r\n self.set_d_to_m()\r\n self.at_var(\"R15\")\r\n self.set_m_to_d()", "def __setattr__(self, name, value):\n if name in [\"sampling_function\", \"env\", \"fit_dist\", \"reset\"]:\n object.__setattr__(self, name, value)\n else:\n setattr(self.env, name, value)", "def set_states(self) -> None:\n self._attr_state = (\n MediaPlayerState.ON if self._zone.power else MediaPlayerState.OFF\n )\n self._attr_is_volume_muted = self._zone.mute\n self._attr_volume_level = self._zone.volume_as_percentage\n self._attr_media_title = self._zone.input_name\n self._attr_app_name = self._zone.input_format\n self._attr_source = self._zone.input_name\n self._attr_source_list = self.avr.input_list", "def set_parameters(cls):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n cls.TSR, cls.RPM, cls.RAD, cls.BLA, cls.CHR, cls.SEC, cls.NT = \\\r\n np.loadtxt('settings.csv', delimiter=',', skiprows=1, unpack=True)", "def set_preset_mode(self, preset_mode: str) -> None:\n if self.target_temperature == 0:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id, self._room_id, STATE_NETATMO_HOME,\n )\n\n if (\n preset_mode in [PRESET_BOOST, STATE_NETATMO_MAX]\n and self._module_type == NA_VALVE\n ):\n self._data.homestatus.setroomThermpoint(\n self._data.home_id,\n self._room_id,\n STATE_NETATMO_MANUAL,\n DEFAULT_MAX_TEMP,\n )\n elif preset_mode in [PRESET_BOOST, STATE_NETATMO_MAX]:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id, self._room_id, PRESET_MAP_NETATMO[preset_mode]\n )\n elif preset_mode in [PRESET_SCHEDULE, PRESET_FROST_GUARD, PRESET_AWAY]:\n self._data.homestatus.setThermmode(\n self._data.home_id, PRESET_MAP_NETATMO[preset_mode]\n )\n else:\n _LOGGER.error(\"Preset mode '%s' not available\", preset_mode)\n\n self.update_without_throttle = True\n self.schedule_update_ha_state()", "def __setstate__(self,values):\n self.initDefault()\n setter = object.__setattr__\n for value,attr in zip(values,self.persistent):\n setter(self,attr,value)\n if self.dirty_sizeCrc == None:\n self.dirty_sizeCrc = {} #--Use empty dict instead.\n self.refreshDataSizeCrc()", "def set_current_operation_mode(self, operation_mode):\n self._current_operation_mode = operation_mode\n \"\"\"Retrieve from textual representation\"\"\"\n if self._current_operation_mode == 'Off':\n self._api._opmode = 0;\n elif self._current_operation_mode == 'Heat only':\n self._api._opmode = 1;\n elif self._current_operation_mode == 'Cool only':\n self._api._opmode = 2;\n elif self._current_operation_mode == 'Heat & Cool':\n self._api._opmode = 3; \n self._api.set()\n self.schedule_update_ha_state()", "def test_set_attributes(test_common_dao):\n _session = test_common_dao.RAMSTK_SESSION(\n bind=test_common_dao.engine, autoflush=False, expire_on_commit=False)\n DUT = _session.query(RAMSTKSiteInfo).first()\n\n _error_code, _msg = DUT.set_attributes(ATTRIBUTES)\n\n assert _error_code == 0\n assert _msg == (\"RAMSTK SUCCESS: Updating RAMSTKSiteInfo attributes.\")", "def mode(self, value):\n self._set_attr('mode', value)", "def set_attr_values(self):\n ats = self.attributes # convenient short name\n for aid in ats:\n value = ats[aid]['nv'] if 'nv' in ats[aid] else (\n ats[aid]['value'] if 'value' in ats[aid] else None)\n if value is not None:\n# self.h5node.attrs[aid] = value\n #- self.file.file_pointer[self.full_path].attrs[aid] = value\n self.file.set_attribute(self.full_path, aid, value)\n #- self.file.h5save_attribute(self.full_path, aid, value)\n #- self.file.h5commands.append(\"set attribute(%s:%s)-%s\" % (self.full_path,\n #- aid, value))", "def set_mode(self, new_mode):\n\n\t\tself._log.info('Mode changed to: %s' % new_mode.name)\n\t\tself._mode = new_mode\n\t\tself._dump_configuration()\n\t\tself._remove_all_flow_records()", "def _setCurrents(self, att, newdata):\n logger.debug(\"Func: _setCurrents\")\n\n self._currentsDict[att] = newdata\n self._saveUserPrefs(self._currentsDict)", "def mode (self, mode) :\r\n self.mode_ = mode", "def set(self):\n\n raise Exception(\"Can't set frmt.\")", "def __resetLocal__(self,featureVals):\n self.amITrained = False\n self._amplitudes = {}\n self._eigs = {}\n self._modes = {}\n self.__Atilde = {}\n self.pivotValues = None\n self.KDTreeFinder = None\n self.featureVals = None", "def setMode(self, newmode=None):\n if newmode==None and self.mode: return\n \n # find it in my dictionary\n for k,v in self.items():\n if k.lower() == \"mode\":\n if newmode:\n self.mode = newmode\n self[k] = str(self.mode)\n else:\n self.mode = int(v)\n \n # it wasn't in the dictionary\n if newmode and not self.mode:\n self.mode = newmode\n self[\"MODE\"] = str(self.mode)\n \n if not self.mode:\n raise NetworkException(\"Supplink mode not set: \" + str(self))", "def set_value(self, index, mode, value):\n address = self.get_address(index, mode)\n self.program[address] = value", "def set_values(self):\n super(ResConfigInherit, self).set_values()\n self.env['ir.config_parameter'].sudo().set_param(\n 'sale_stock_restrict.product_restriction', self.product_restriction)\n self.env['ir.config_parameter'].sudo().set_param(\n 'sale_stock_restrict.check_stock', self.check_stock)", "def set_mode(self, mode):\n print('set_mode', mode)\n self._mode = int(mode)", "def set_state(self, dic: Dict) -> None:\n self.patience = dic[\"patience\"]\n self.cooldown = dic[\"cooldown\"]\n self.cooldown_counter = dic[\"cooldown_counter\"]\n self.mode = dic[\"mode\"]\n self.threshold = dic[\"threshold\"]\n self.threshold_mode = dic[\"threshold_mode\"]\n self.best = dic[\"best\"]\n self.num_bad_epochs = dic[\"num_bad_epochs\"]\n self.mode_worse = dic[\"mode_worse\"]\n self.last_epoch = dic[\"last_epoch\"]", "def setfocus(self, focus):\n self.focus = self.data[focus]\n self.focus_stage = focus\n for k in self.focus.keys():\n setattr(self, k, self.focus[k])", "def set_mode(self, mode: QcQuantizeOpMode):\n self._mode = mode", "def setUp(self):\n self.frequency = 250\n self.firmware = 30474\n self.mask = lmdm.ListModeDataMask(self.frequency, self.firmware)", "def set_preset_mode(self, preset_mode: str | None) -> None:\n self.svc_set_system_mode(PRESET_TO_TCS.get(preset_mode, SystemMode.AUTO))", "def _set_attributes(self):", "def set_temperature(self, **kwargs):\n temperature = kwargs.get(ATTR_TEMPERATURE)\n if temperature is None:\n return\n\n self._current_operation_mode = CONST_MODE_FIXED\n self._device.set_new_temperature(temperature)", "def setDataRate(self, DataRate):\n \n self.DataRate = DataRate", "def set_data(self, df):\r\n # Check data is correct.\r\n cols = df.shape[1]\r\n conditions = [cols > 2,\r\n df.index.name == 'r',\r\n df.columns[0] == 't']\r\n if False in conditions:\r\n raise ValueError(f'{self} wrong data set.')\r\n\r\n # Set attributes and log\r\n self.data = df\r\n self._set_rate()\r\n logger.debug(f'{self} set data')", "def _async_update_attrs(self) -> None:\n super()._async_update_attrs()\n self._attr_color_temp = color_temperature_kelvin_to_mired(\n self._device.light_color_temp\n )", "def set_mode(self, mode):\n if mode in self.MODES:\n self.mode = self.MODES[mode]", "def _localSetState(self,pdict):\n self.apex = pdict.pop('apex')\n self.min = pdict.pop('min' )\n self.max = pdict.pop('max' )", "def setModelData(self, ledit, model, midx):\n sel = self._sel\n val = None\n txt = str(ledit.text())\n if txt:\n val = float(txt)\n cond = sel.give_cond(midx.row())\n cond[midx.column()] = val\n sel.notify_wizard()", "def _setup_misc(self, mode):\n self.lr_rate_ph = tf.Variable(0.0, name='lrn_rate', trainable=False)\n self.reuse = None if (mode == 'train') else True\n self.batch_size = self.hparams.batch_size\n if mode == 'eval':\n self.batch_size = 25", "def setMode(cls, mode):\n global CURRENT_MODE\n assert isinstance(mode, cls), \"Invalid mode {}\".format(mode)\n CURRENT_MODE = mode", "def set_simulation_metadata(self, crmode, burst_mode=None):\n if hasattr(self, 'meta') and hasattr(self.meta, 'simulator'):\n self.meta.simulator.cosmic_ray_mode = crmode\n if burst_mode is not None:\n self.meta.subarray_burst_mode = burst_mode\n else:\n strg = \"***Simulation metadata attributes missing from data model\"\n raise AttributeError(strg)", "def set_attr(self):\n\n # Create a new array\n self.fileh.create_array('/', 'array', self.a1)\n for i in range(self.nobjects):\n # Set an attribute\n setattr(self.fileh.root.array.attrs, \"attr\" + str(i), str(self.a1))\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print(\"Time for Undo, Redo (set_attr):\", undo, \"s, \", redo, \"s\")", "def resetDefences(self):\n self.currentAP = self.maxAP\n self.currentSP = self.maxSP", "def set_preset_mode(self, preset_mode):\n\n if preset_mode == PRESET_HOME:\n \"\"\"Turn away mode off.\"\"\"\n self._away = False\n self._device.set_temperature_to_auto()\n\n elif preset_mode == PRESET_AWAY:\n \"\"\"Turn away mode on.\"\"\"\n self._away = True\n self._device.set_location_to_frost()\n\n else:\n raise InvalidStateError\n\n pass", "def reset_variables(self) -> None:\n self.attributs = {}\n self.data = []", "def fun_set(self):\n\n self.type.set(self.xtl._scattering_type)\n # self.energy_kev.set(8)\n self.theta_offset.set(self.xtl._scattering_theta_offset)\n self.theta_min.set(self.xtl._scattering_min_theta)\n self.theta_max.set(self.xtl._scattering_max_theta)\n self.twotheta_min.set(self.xtl._scattering_min_two_theta)\n self.twotheta_max.set(self.xtl._scattering_max_two_theta)\n\n if self.orientation.get() == 'Reflection':\n self.direction_h.set(self.xtl._scattering_specular_direction[0])\n self.direction_k.set(self.xtl._scattering_specular_direction[1])\n self.direction_l.set(self.xtl._scattering_specular_direction[2])\n else:\n self.direction_h.set(self.xtl._scattering_parallel_direction[0])\n self.direction_k.set(self.xtl._scattering_parallel_direction[1])\n self.direction_l.set(self.xtl._scattering_parallel_direction[2])", "def setData(self, data):\n self.data = data\n dagPath, components = self.__getGeometryComponents()\n self.setInfluenceWeights(dagPath, components)\n self.setBlendWeights(dagPath, components)\n\n for attr in ['skinningMethod', 'normalizeWeights']:\n cmds.setAttr('%s.%s' % (self.node, attr), self.data[attr])", "def set_manual_mode(self):\n self._kernel.set_manual_mode()", "def setModes(self, measurement = 'Ambiant', color_space = 'CIExyY', illumination = 'Emission'):\n self.setMeasurementMode(measurement)\n self.setColorSpace(color_space) \n self.setIlluminationMode(illumination)", "def set_presets(self, presets):\r\n self.presets = presets\r\n self.pvs.preal.putw(presets.real_time)\r\n self.pvs.plive.putw(presets.live_time)\r\n self.pvs.dwell.putw(presets.dwell)\r\n self.pvs.channel_advance.putw(presets.channel_advance)\r\n self.pvs.prescale.putw(presets.prescale)", "def setMode(self,mode):\n self.mode=mode\n if self.mode==0:\n self.setDrawing()\n elif self.mode==1:\n self.setConstruction()\n elif self.mode==2:\n self.setDisplay()\n self.context.text.append(\"mode: \"+self.messages[self.mode])", "def set_mode(self, mode='List'):\r\n _debug('simq03b_api.set_mode')\r\n \r\n #If we choose list mode \r\n if mode.lower() == 'list':\r\n #First choose a list if there was no, otherwise SMA100B is mad\r\n #To know the available list, the query is 'SOUR1:LIST:CAT?'\r\n self.write('SOUR1:LIST:SEL \"/var/user/list1.lsw\"') \r\n \r\n self.write('OUTP1:STAT ON') #Somehow the SMA100B wants the RF to be ON for switching into list mode.\r\n self.write('SOUR1:LIST:MODE STEP') #Make Step mode in order to not automatically sweep all the frequencies\r\n self.write('SOURce:FREQuency:MODE LIST')\r\n else:\r\n #CW and FIXed are synonyms for SMA100B\r\n self.write('SOURce:FREQuency:MODE CW')", "def set_temperature(self, **kwargs):\n self._target_temperature_low = kwargs.get(ATTR_TARGET_TEMP_LOW)\n self._target_temperature_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)\n temp = kwargs.get(ATTR_TEMPERATURE)\n if self.current_operation == 'Heat & Cool' and self._target_temperature_low is not None \\\n and self._target_temperature_high is not None:\n self._api._heatto = self._target_temperature_low\n self._api._coolto = self._target_temperature_high\n elif temp is not None:\n if self.current_operation == 'Heat only':\n self._api._heatto = temp\n self._api._coolto = temp + 10\n elif self.current_operation == 'Cool only':\n self._api._heatto = temp - 10\n self._api._coolto = temp \n self._api.set()\n self.schedule_update_ha_state()", "def assign_model_parameters(self,xmax,zmax,dh,duration):\n self.model_parameters['xmax']=xmax\n self.model_parameters['zmax']=zmax\n self.model_parameters['dh']=dh\n self.model_parameters['duration']=duration", "def setvalue(self,num,name,val):\n self.M.reconfigure(num,{name:float(val)})", "def set_temperature(self, **kwargs):\n low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)\n high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)\n if low_temp is not None:\n low_temp = round(low_temp)\n self._device.set_setpoint_heat(low_temp)\n if high_temp is not None:\n high_temp = round(high_temp)\n self._device.set_setpoint_cool(high_temp)", "def set_slam_type(self, mode):\n if mode == LandmarkMode.RANSAC:\n self.slam.naive = False\n self.slam.slam_mode = SlamMode.LANDMARKS\n self.slam.landmark_mode = LandmarkMode.RANSAC\n elif mode == LandmarkMode.HOUGH:\n self.slam.naive = False\n self.slam.slam_mode = SlamMode.LANDMARKS\n self.slam.landmark_mode = LandmarkMode.HOUGH\n elif mode == SlamMode.SCAN_MATCHING:\n self.slam.naive = False\n self.slam.slam_mode = SlamMode.SCAN_MATCHING\n else:\n self.slam.naive = True", "def restore(self):\n if self.obj:\n for attrib in self.attribs:\n setattr(self.obj, attrib, getattr(self, attrib))", "def _localSetState(self,pdict):\n self.mean = pdict.pop('mean' )\n self.sigma = pdict.pop('sigma')", "def _localSetState(self,pdict):\n self.mean = pdict.pop('mean' )\n self.sigma = pdict.pop('sigma')", "def on_model_reset(self):\n cmp = self.mdl.cmp\n ui = self.ui\n\n cmp.pcb_layers[\"silkscreen\"].attribs[\"stroke-width\"] = \"0.5\"\n # Set values on schematic tab\n ui.txt_label.setText(cmp.part_name)\n ui.spnbox_pincount.setValue(len(cmp.connectors))\n ui.spn_add_width.setValue(cmp.s_add_width)\n ui.spn_add_height.setValue(cmp.s_add_height)\n\n # Set values on pcb tab\n ui.txt_spacing_h.setText(str(cmp.p_spacing_h))\n ui.txt_spacing_v.setText(str(cmp.p_spacing_v))\n ui.radio_smd.setChecked(ComponentBase.MOUNT_SMD == cmp.mount)\n ui.radio_tht.setChecked(ComponentBase.MOUNT_THT == cmp.mount)\n\n # Set values on silkscreen tab\n self.ui.txt_silkscreen.set_component(cmp)\n \n # Refresh canvas\n self.ui.svg_canvas.set_component(cmp)\n self.refresh_svg_canvas()\n # Just to update table... ugly way but...\n self.ui.tabWidget.setCurrentIndex(1)\n self.ui.tabWidget.setCurrentIndex(0)", "def _resetParam(self,scn,context):\n\n\t\tif self.camipo:\n\t\t\tself.cam.ipo = self.camipo\n\n\t\tcontext.renderPath = self.path\n\t\tself.cam.lens = self.lens\n\t\tself.cam.scale = self.scale\n\t\tself.cam.shiftX = self.shiftX\n\t\tself.cam.shiftY = self.shiftY\n\t\tself.scn.update()\n\t\treturn", "def setSelectModeData(self):\n self._nodeSelectMode = False\n self._dataSelectMode = True\n self._elemSelectMode = False", "def set_preset_mode(self, preset_mode: str | None) -> None:\n self.svc_set_zone_mode(\n mode=PRESET_TO_ZONE.get(preset_mode),\n setpoint=self.target_temperature if preset_mode == \"permanent\" else None,\n )", "def setEvaluationMode(self, newMode):\n \n pass", "def set_variables(self):\n self.feat_size = None # Set this in your inherited class\n raise NotImplementedError(\"set_variables() is not implemented\")", "def _localSetState(self,pdict):\n self.low = pdict.pop('low' )\n self.alpha = pdict.pop('alpha')\n self.beta = pdict.pop('beta' )", "def set_parameters(self):\n\n if self.model_with_set_params:\n return\n\n self._model_with_set_params = self._parameter_values.process_model(\n self._unprocessed_model, inplace=False\n )\n self._parameter_values.process_geometry(self.geometry)\n self.model = self._model_with_set_params", "def __mode_reset(self):\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tval.reset_restart()", "def set_attr(self, attr_name: str, value: Any, indices: VecEnvIndices = None) -> None:\n raise NotImplementedError()", "def mode(self, mode):\n self.set_mode(mode)", "def set_system_cache(self, mode):\n TikCheckUtil.check_equality(\n get_soc_name(), ASCEND_910,\n \"this api doesn't support version: %s\" % get_soc_name())\n TikCheckUtil.check_type_match(\n mode, (int, Scalar), \"mode should be int or Scalar\")\n if isinstance(mode, int):\n TikCheckUtil.check_in_range(\n mode, range(MAX_SYSTEM_CACHE_MODE),\n \"mode should be in the range of [0, 3]\")\n if isinstance(mode, Scalar):\n TikCheckUtil.check_equality(\n mode.dtype, \"uint64\",\n \"scalar_mode should be a scalar of uint64\")\n ctrl = self._mov_ctrl_spr_to_scalar()\n ctrl.set_as(ctrl & SYSTEM_CACHE_MASK)\n ctrl.set_as(ctrl | (mode << SYSTEM_CACHE_MODE_SHIFT_POS))\n with self.new_scope():\n self.emit(tvm.call_extern(\"uint64\", \"set_ctrl\", ctrl.get()),\n ONE_IR)", "def _localSetState(self,pdict):\n self.low = pdict.pop('low' )\n self.high = pdict.pop('high' )\n self.alpha = pdict.pop('alpha')\n self.beta = pdict.pop('beta' )", "def setModelData(self, ledit, model, midx):\n sel = self._sel\n val = None\n txt = str(ledit.text())\n if txt :\n val=txt\n cond = sel.give_cond(midx.row())\n cond[midx.column()] = val\n sel.notify_wizard()", "def set_mode(self,mode,state=True):\n\t\tprint \"SET_MODE START\"\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tif val.index(mode) is not None:\n\t\t\t\tif state:\n\t\t\t\t\tval.activate( val.index(mode) )\n\t\t\t\telse:\n\t\t\t\t\tval.deactivate( val.index(mode) )\n\t\t\"\"\"\n\t\tprint \"SET_MODE DONE -- ALSO DOING EXPERIMENTAL -- \"\n\t\t# DEBUG / EXPERIMENTAL\n\t\tif self.int_encoder is not None:\n\t\t\tif mode == 'volume' and state == True and 'mode_timeout' in self.cfg_gpio and self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. GPIO/VOLUME ({0}:{1}).. disabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.remove_event_detect(13)\n\t\t\t\tself.gpio.remove_event_detect(6)\n\t\t\t\tself.int_enabled = False\n\t\t\telif mode != 'volume' and state == True and 'mode_timeout' in self.cfg_gpio and not self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. GPIO/NOT VOLUME ({0}:{1}).. enabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.setup((13,6), self.gpio.IN, pull_up_down=self.gpio.PUD_DOWN)\n\t\t\t\tself.gpio.add_event_detect(13, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime \n\t\t\t\tself.gpio.add_event_detect(6, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime\n\t\t\t\tself.int_enabled = True\n\t\t\telif mode == 'volume' and state == True and 'mode_timeout' not in self.cfg_gpio and not self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. ECA/VOLUME ({0}:{1}).. enabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.setup((13,6), self.gpio.IN, pull_up_down=self.gpio.PUD_DOWN)\n\t\t\t\tself.gpio.add_event_detect(13, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime \n\t\t\t\tself.gpio.add_event_detect(6, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime\n\t\t\t\tself.int_enabled = True\n\t\t\telif mode != 'volume' and state == True and 'mode_timeout' not in self.cfg_gpio and self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. ECA/NOT VOLUME ({0}:{1}).. disabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.remove_event_detect(13)\n\t\t\t\tself.gpio.remove_event_detect(6)\n\t\t\t\tself.int_enabled = False\n\t\t\tprint \"DEBUG2.. done\"\n\t\t\"\"\"", "def set_reduced_mode(self, on):\r\n return self._arm.set_reduced_mode(on)", "def change_Focus(self, rate):\n self.speed = int(rate)\n print(\"Setting Focus Rate to: \" + str(rate))", "def reset_attributes(self):\n\n self.ell = None\n self.ell_jacobian = None\n self.ell_hessian = None\n\n self.ell_hyperparam = None\n self.ell_jacobian_hyperparam = None\n self.ell_hessian_hyperparam = None\n\n self.Y = None\n self.Cinv = None\n self.C = None\n self.Mz = None\n self.MMz = None\n self.sigma2 = None\n self.sigma02 = None\n self.Kninv = None\n self.KnpKninv = None\n\n self.Y_C_Mz_hyperparam = None\n self.sigma_hyperparam = None\n self.MMz_hyperparam = None\n self.Kninv_KnpKninv_hyperparam = None", "def _set_attr(self):\n self.as_skeletal = self._import_as_skeleton()\n self.materials = self._import_materials()\n self.textures = self._import_textures()", "def setScale(self, mode='ACC', scale=0):\r\n\t\tif mode.upper() == 'ACC':\r\n\t\t\treg = 0x1C\r\n\t\telif mode.upper() == 'GYR':\r\n\t\t\treg = 0x1B\t\t\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\tcurrentVal = self.read(reg)\r\n\t\tcurrentVal = self.dec2BinList(currentVal)\r\n\t\tscale = self.dec2BinList(value=scale,bits=2)\r\n\t\tcurrentVal[3] = scale[0]\r\n\t\tcurrentVal[4] = scale[1]\r\n\t\tcurrentVal = self.binList2Dec(currentVal)\r\n\t\tself.write(reg, currentVal)", "def set_train(self):\n self.train()\n self.volatile = False", "def mode(self, mode):\n\n self._mode = mode", "def mode(self, mode):\n\n self._mode = mode", "def mode(self, mode):\n\n self._mode = mode", "def _localSetState(self,pdict):\n self.mu = pdict.pop('mu')", "def _localSetState(self,pdict):\n self.location = pdict.pop('location')\n self.scale = pdict.pop('scale' )", "def _localSetState(self,pdict):\n self.location = pdict.pop('location')\n self.scale = pdict.pop('scale' )", "def set(self, attrname, value):\n setattr(self, attrname, value)\n self.dirty = True", "def set_state( self ):", "def __setstate__(self, dict):\n\n\t\tself.__dict__ = dict\n\n\t\t# Set missing values to defaults.\n\t\tself._device = None\n\t\tself.resources = {}", "def set_mode_train(self):\n self._set_mode('train')\n return self", "def init_game_setting(self):\n np.random.seed(1) \n self.s_prev = np.zeros((80, 80, 1))\n print('loading trained model from {}'.format(self.model_path))\n self.sess = tf.InteractiveSession(graph=self.model)\n self.saver.restore(self.sess, self.model_path)", "def set_mode(self, mode):\n self.mode = mode\n self.btn_mode.setText(f\"{mode.title()}\\u25BE\")\n self.state_changed()", "def set_operation_mode(self, operation_mode):\n self.api.device_control(self.obj_id, \"modeSet\", {\"value\": operation_mode})", "def set_data(self, data):\n self._model.set_data(data)\n self.__refresh()", "def test_device_state_attributes(self):\n self.port.data = {\"v_rms\": 1.25, \"i_rms\": 2.75}\n assert {\"volts\": 1.2, \"amps\": 2.8} == self.switch.device_state_attributes", "def set_temperature(self, **kwargs: Any) -> None:\n if kwargs.get(ATTR_TEMPERATURE) is not None:\n self.vera_device.set_temperature(kwargs.get(ATTR_TEMPERATURE))\n\n self.schedule_update_ha_state()", "def set_attribute(self, name, value):\n\n pass", "def set_attributes(self, attributes):\n _error_code = 0\n _msg = \"RAMSTK SUCCESS: Updating RAMSTKMeasurement {0:d} attributes.\". \\\n format(self.measurement_id)\n\n try:\n self.code = str(\n none_to_default(attributes['code'], 'Measurement Code'))\n self.description = str(\n none_to_default(attributes['description'],\n 'Measurement Description'))\n self.description = str(\n none_to_default(attributes['measurement_type'], 'unknown'))\n except KeyError as _err:\n _error_code = 40\n _msg = (\"RAMSTK ERROR: Missing attribute {0:s} in attribute \"\n \"dictionary passed to \"\n \"{1:s}.set_attributes().\").format(_err,\n self.__class__.__name__)\n\n return _error_code, _msg", "def set_temperature(self):\n self.temperature = self.gui.doubleSpinBox_temperature.value()\n self.logger.debug('Changing the temperature to {}K'.format(self.temperature))\n\n self.anc350_instrument.temperature = self.temperature\n self.anc350_instrument.set_temperature_limits()\n\n self.max_dclevel_V = self.anc350_instrument.max_dC_level\n\n self.logger.debug('Changed the scanner piezo limits to {}'.format(self.max_dclevel_V))" ]
[ "0.59262383", "0.5660007", "0.55965436", "0.5562529", "0.5450975", "0.54298156", "0.54060215", "0.53941303", "0.53482354", "0.5345499", "0.53415346", "0.53402495", "0.5300903", "0.52654946", "0.52514917", "0.52222824", "0.5211762", "0.52113414", "0.5209696", "0.5204078", "0.5194123", "0.5184927", "0.51835614", "0.51621985", "0.5158411", "0.5156063", "0.51311964", "0.51306415", "0.5121351", "0.51172864", "0.511255", "0.51065004", "0.50932866", "0.5092696", "0.50913423", "0.5090105", "0.5089045", "0.508303", "0.5071149", "0.50646234", "0.5063791", "0.50450075", "0.50435203", "0.50415367", "0.502975", "0.5028329", "0.5023109", "0.5019641", "0.5000027", "0.49889606", "0.49877784", "0.4985648", "0.49832445", "0.49827662", "0.49809387", "0.4966196", "0.49640238", "0.49605414", "0.49604887", "0.49604887", "0.49567932", "0.49563757", "0.49488074", "0.494236", "0.49388647", "0.49369645", "0.4935492", "0.49338713", "0.49298048", "0.49275786", "0.49204248", "0.4917603", "0.4917093", "0.4913919", "0.49125957", "0.4899635", "0.48902762", "0.48874882", "0.48852438", "0.48846075", "0.48807588", "0.4875578", "0.4875578", "0.4875578", "0.4869616", "0.48653716", "0.48653716", "0.48513627", "0.4848827", "0.4844561", "0.48404104", "0.48394826", "0.48348168", "0.48342255", "0.48330608", "0.4831983", "0.48311153", "0.48256552", "0.48256028", "0.48167753" ]
0.6953201
0
Calculate the Criticality for the Mode. Mode Criticality = Item Hazard Rate Mode Ratio Mode Operating Time Effect Probability
def calculate_criticality(self, item_hr): _error_code = 0 _msg = 'RAMSTK SUCCESS: Calculating failure mode {0:d} criticality.'.\ format(self.mode_id) if item_hr < 0.0: _error_code = 2010 _msg = _(u"RAMSTK ERROR: Item hazard rate has a negative value.") raise OutOfRangeError(_msg) if not 0.0 <= self.mode_ratio <= 1.0: _error_code = 2010 _msg = _( u"RAMSTK ERROR: Failure mode ratio is outside the range of " u"[0.0, 1.0].") raise OutOfRangeError(_msg) if self.mode_op_time < 0.0: _error_code = 2010 _msg = _(u"Failure mode operating time has a negative value.") raise OutOfRangeError(_msg) if not 0.0 <= self.effect_probability <= 1.0: _error_code = 2010 _msg = _(u"Failure effect probability is outside the range " u"[0.0, 1.0].") raise OutOfRangeError(_msg) self.mode_hazard_rate = item_hr * self.mode_ratio self.mode_criticality = self.mode_hazard_rate \ * self.mode_op_time * self.effect_probability if self.mode_hazard_rate < 0.0: _error_code = 2010 _msg = _(u"Failure mode hazard rate has a negative value.") raise OutOfRangeError(_msg) if self.mode_criticality < 0.0: _error_code = 2010 _msg = _(u"Failure mode criticality has a negative value.") raise OutOfRangeError(_msg) return _error_code, _msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conductivity(self):\n m = 1.67296736e-02 # Determined from optimisation\n c = 8.54665149e-05 # Determined from optimisation\n return m * self.concentration + c", "def coherence(self):\r\n return np.abs(self.coherency) ** 2", "def functionality(self):\n self._functionality = 0.12 * self.CAMC + 0.22 * self.NOP + 0.22 * self.CIS + 0.22 * self.DSC + 0.22 * self.NOH\n return round(self._functionality, 5)", "def test_concentration_profile(self):\n\n # TODO: uncomment when have average concentrations\n # small number so that can use array less\n # epsilon = 0.001\n\n # if self.operating_condition == \"discharge\":\n # np.testing.assert_array_less(\n # -self.c_e_n_av.entries, self.c_e_av.entries + epsilon\n # )\n # np.testing.assert_array_less(\n # self.c_e_p_av.entries, self.c_e_av.entries + epsilon\n # )\n # elif self.operating_condition == \"charge\":\n # np.testing.assert_array_less(\n # -self.c_e_n_av.entries, self.c_e_av.entries + epsilon\n # )\n # np.testing.assert_array_less(\n # self.c_e_p_av.entries, self.c_e_av.entries + epsilon\n # )\n # elif self.operating_condition == \"off\":\n # np.testing.assert_array_equal(self.c_e_n_av.entries, self.c_e_av.entries)\n # np.testing.assert_array_equal(self.c_e_s_av.entries, self.c_e_av.entries)\n # np.testing.assert_array_equal(self.c_e_p_av.entries, self.c_e_av.entries)", "def coherence(self):\r\n coherence = np.abs(self.coherency ** 2)\r\n\r\n return coherence", "def critical_depth(self):\n crit_depth = math.pow((self.flow**2 /\n (self.width ** 2 * Channel.g)), (1/3))\n return crit_depth", "def complexity(model):\n size = cfg.TRAIN.IM_SIZE\n cx = {\"h\": size, \"w\": size, \"flops\": 0, \"params\": 0, \"acts\": 0}\n cx = model.complexity(cx)\n return {\"flops\": cx[\"flops\"], \"params\": cx[\"params\"], \"acts\": cx[\"acts\"]}", "def aic_c(self):\n if hasattr(self, '_aic_c'):\n return self._aic_c\n else:\n k = len(self.params)\n n = self.data['n'].sum()\n self._aic_c = self.aic() + (2*k**2 + 2*k)/(n - k - 1)\n return self._aic_c", "def cangeMode( Tables, WarningMessage, Mode ):\n\n if ( Mode == 1 ):\n\n UniformValue = Tables[ \"ElasticModulus\" ].getValue( 0, 0 )\n Tables[ \"ElasticModulus\" ].setValue( 0, 1, UniformValue )\n Tables[ \"ElasticModulus\" ].setValue( 0, 2, UniformValue )\n\n UniformValue = Tables[ \"ElasticModulus\" ].getFloatValue( 0, 0 ) \\\n / ( 2.0 * ( 1.0 + Tables[ \"PoissonRatios\" ].getFloatValue( 0, 0 )))\n Tables[ \"ShearModulus\" ].setValue( 0, 0, '{:.2e}'.format( UniformValue ) )\n Tables[ \"ShearModulus\" ].setValue( 0, 1, '{:.2e}'.format( UniformValue ) )\n Tables[ \"ShearModulus\" ].setValue( 0, 2, '{:.2e}'.format( UniformValue ) )\n\n UniformValue = Tables[ \"PoissonRatios\" ].getValue( 0, 0 )\n Tables[ \"PoissonRatios\" ].setValue( 0, 1, UniformValue )\n Tables[ \"PoissonRatios\" ].setValue( 0, 2, UniformValue )\n\n try:\n testInputData( Mode, Tables[ \"PoissonRatios\" ].getData() )\n except VibroP_DataCorrupted as Error:\n WarningMessage.printMessage( str( Error ) )\n\n\n\n if ( Mode == 0 ):\n\n Tables[ \"ElasticModulus\" ].restoreValue( 0, 1 )\n Tables[ \"ElasticModulus\" ].restoreValue( 0, 2 )\n\n Tables[ \"ShearModulus\" ].restoreValue( 0, 0 )\n Tables[ \"ShearModulus\" ].restoreValue( 0, 1 )\n Tables[ \"ShearModulus\" ].restoreValue( 0, 2 )\n\n Tables[ \"PoissonRatios\" ].restoreValue( 0, 1 )\n Tables[ \"PoissonRatios\" ].restoreValue( 0, 2 )\n\n precomputePoissonRatios( Tables )", "def calcCV(self):\n # Make sure Zm Area and Standard Error are already calculated\n if not hasattr(self,'ZmArea'):\n self.calcZmArea()\n if not hasattr(self,'SE'):\n self.calcSE()\n # Coefficient of Variation = Standard Error / Zm Area\n if self.ZmArea > 0:\n self.CV = self.SE / self.ZmArea\n else:\n self.CV = 0\n return self.CV", "def calcCV(self):\n # Make sure Zm Area and Standard Error are already calculated\n if not hasattr(self,'ZmArea'):\n self.calcZmArea()\n if not hasattr(self,'SE'):\n self.calcSE()\n # Coefficient of Variation = Standard Error / Zm Area\n if self.ZmArea > 0:\n self.CV = self.SE / self.ZmArea\n else:\n self.CV = 0\n return self.CV", "def _normed_concentration(self, time: float) -> _VectorisedFloat:\n # The model always starts at t=0, but we avoid running concentration calculations\n # before the first presence as an optimisation.\n if time <= self._first_presence_time():\n return self.min_background_concentration()/self.normalization_factor()\n \n next_state_change_time = self._next_state_change(time)\n\n RR = self.removal_rate(next_state_change_time)\n # If RR is 0, conc_limit does not play a role but its computation \n # would raise an error -> we set it to zero.\n try:\n conc_limit = self._normed_concentration_limit(next_state_change_time)\n except ZeroDivisionError:\n conc_limit = 0.\n\n t_last_state_change = self.last_state_change(time)\n conc_at_last_state_change = self._normed_concentration_cached(t_last_state_change)\n\n delta_time = time - t_last_state_change\n fac = np.exp(-RR * delta_time)\n\n return conc_limit * (1 - fac) + conc_at_last_state_change * fac", "def electrical_delay(self):\n mode = self._pna.query('CALC{}:CORR:EDEL:TIME?'.format(self._channel))\n if mode:\n return float(mode)*1000000000.0\n else:\n raise InstrIOError(cleandoc('''Agilent PNA did not return the\n channel {} electrical delay'''.format(self._channel)))", "def concentration(self, time: float) -> _VectorisedFloat:\n return (self._normed_concentration_cached(time) * \n self.normalization_factor())", "def _critic(self, image):\n return torch.mean(self.critic(image))", "def coherency(self):\r\n coherency = tsa.cache_to_coherency(self.cache, self.ij)\r\n\r\n return coherency", "def cond_dict(calib, F, t, p):\n try:\n Conductivity = []\n f = [x/1000 for x in F]\n for F_0, t_0, p_0 in zip(f, t, p):\n temp = ((calib['G'] + calib['H'] * math.pow(F_0,2)\n + calib['I'] * math.pow(F_0,3)\n + calib['J'] * math.pow(F_0,4))\n / (1 + calib['CTcor'] * t_0 + calib['CPcor'] * p_0))\n temp = round(temp, 5)\n Conductivity.append(temp)\n #single mode\n except:\n f = F/1000\n Conductivity = ((calib['G'] + calib['H'] * math.pow(f,2)\n + calib['I'] * math.pow(f,3)\n + calib['J'] * math.pow(f,4))\n / (1 + calib['CTcor'] * t + calib['CPcor'] * p))\n Conductivity = round(Conductivity,5)\n return Conductivity", "def effectiveness(self):\n self._effectiveness = 0.20 * self.ANA + 0.20 * self.DAM + 0.20 * self.MOA + 0.20 * self.MFA + 0.20 * self.NOP\n return round(self._effectiveness, 5)", "def orbital_eccentricity(self):\n return self._orbital_eccentricity", "def Get_Meas_Res_Reliability(self, mode, ch=1):\n rdStr = self.query(f':MEAS{ch}:RES:REL? {mode}')\n return rdStr", "def test_calc_mode():\r\n lamb = 1.8e-6\r\n radius = 50e-6\r\n num_points = 50\r\n x = np.linspace(-1, 1, num_points) * radius\r\n y = np.linspace(-1, 1, num_points)[:, None] * radius\r\n r = (x ** 2 + y ** 2) ** 0.5\r\n theta = np.arctan2(x, y)\r\n dA = (x[1] - x[0]) * (y[1, 0] - y[0, 0])\r\n ##\r\n M = 5\r\n N = 3\r\n fields = []\r\n for m in np.arange(1, M + 1):\r\n for n in np.arange(-N, N + 1):\r\n for theta0 in (0, np.pi / 2):\r\n fields.append(hc.calc_mode(1.5, radius, n, m, lamb, r, theta, dA, theta0))\r\n rows = []\r\n for f1 in fields:\r\n row = []\r\n for f2 in fields:\r\n row.append(hc.calc_mode_overlap(f1, f2, dA))\r\n rows.append(row)\r\n overlap = np.array(rows)", "def calculate_part(self):\r\n\r\n from math import exp\r\n\r\n self.hazard_rate_model = {}\r\n\r\n if self.hazard_rate_type == 1:\r\n self.hazard_rate_model['equation'] = 'lambdab * piQ'\r\n elif self.hazard_rate_type == 2:\r\n self.hazard_rate_model['equation'] = 'lambdab * piQ * piE * piCV'\r\n\r\n # Base hazard rate.\r\n _stress = (self.operating_voltage + self.acvapplied) / \\\r\n self.rated_voltage\r\n try:\r\n self.hazard_rate_model['lambdab'] = \\\r\n 0.00115 * ((_stress / 0.4)**5 + 1) * \\\r\n exp(2.5 * ((self.temperature_active + 273) /\r\n self.reference_temperature)**18)\r\n except(OverflowError, ZeroDivisionError):\r\n # TODO: Handle overflow error.\r\n return True\r\n\r\n # Capacitance correction factor.\r\n self.piCV = 1.4 * (self.capacitance * 1000000.0)**0.12\r\n self.hazard_rate_model['piCV'] = self.piCV\r\n\r\n return Capacitor.calculate_part(self)", "def calculate_part(self):\r\n\r\n from math import exp\r\n\r\n self.hazard_rate_model = {}\r\n\r\n if self.hazard_rate_type == 1:\r\n self.hazard_rate_model['equation'] = 'lambdab * piQ'\r\n elif self.hazard_rate_type == 2:\r\n self.hazard_rate_model['equation'] = 'lambdab * piQ * piE * piCV'\r\n\r\n # Base hazard rate.\r\n _stress = (self.operating_voltage + self.acvapplied) / \\\r\n self.rated_voltage\r\n try:\r\n self.hazard_rate_model['lambdab'] = \\\r\n 0.00069 * ((_stress / 0.4)**5 + 1) * \\\r\n exp(2.5 * ((self.temperature_active + 273) /\r\n self.reference_temperature)**18)\r\n except(OverflowError, ZeroDivisionError):\r\n # TODO: Handle overflow error.\r\n return True\r\n\r\n # Capacitance correction factor.\r\n self.piCV = 1.2 * (self.capacitance * 1000000.0)**0.092\r\n self.hazard_rate_model['piCV'] = self.piCV\r\n\r\n return Capacitor.calculate_part(self)", "def receiver_operating_characteristic_curve(self):\r\n\r\n labels, scores = self.receiver_operating_characteristic_labels_scores()\r\n return sklearn.metrics.roc_curve(labels, scores)", "def evaluate(self, mode=0):\r\n winner = self.determine_winner()\r\n if winner:\r\n return winner * self.WIN_SCORE\r\n\r\n if mode == 1:\r\n return self.centre_priority_evaluate()\r\n elif mode == 2:\r\n return 0.5 * (self.centre_priority_evaluate() + self.piece_evaluate())\r\n else:\r\n return self.piece_evaluate()", "def CE(self, p_true, p_model):\n return np.sum(-np.array(p_true)*np.log2(np.array(p_model)))", "def _get_concentration(self, state):\n return self.fc(state.float_features).exp() + self.EPSILON", "def _critic(self):\n nactions = np.product(self.env.action_shape)\n action_input = keras.layers.Input(shape=(nactions,), name='action_input')\n obs_input = keras.layers.Input(shape=(1,) + self.env.observation_space.shape, name='observation_input')\n flattened_obs = keras.layers.Flatten()(obs_input)\n\n out = keras.layers.Concatenate()([action_input, flattened_obs])\n out = keras.layers.Dense(16)(out)\n out = keras.layers.Activation('relu')(out)\n out = keras.layers.Dense(8)(out)\n out = keras.layers.Activation('relu')(out)\n out = keras.layers.Dense(1)(out) # Must be single output\n out = keras.layers.Activation('linear')(out)\n critic = keras.models.Model(inputs=[action_input, obs_input], outputs=out)\n return critic, action_input", "def get_cpu_mode(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCpuMode', self.handle)", "def calc_cogen_const(q_heat_Wh, thermal_eff, electrical_eff):\n q_fuel_Wh = q_heat_Wh / thermal_eff\n p_el_Wh = q_fuel_Wh * electrical_eff\n q_anth_Wh = q_fuel_Wh - (q_heat_Wh + p_el_Wh)\n return q_fuel_Wh, p_el_Wh, q_anth_Wh", "def crestCavity(self):\n return self.optimiseParam(lambda ph: -self.phaseToMomentum(ph), 'Crest cavity', 'phase', 'degrees', tol=1e-4)", "def test_concentration_increase_decrease(self):\n\n t, x_n, x_p, r_n, r_p = self.t, self.x_n, self.x_p, self.r_n, self.r_p\n\n if self.model.options[\"particle\"] in [\"quadratic profile\", \"quartic profile\"]:\n # For the assumed polynomial concentration profiles the values\n # can increase/decrease within the particle as the polynomial shifts,\n # so we just check the average instead\n neg_diff = self.c_s_n_rav(t[1:], x_n) - self.c_s_n_rav(t[:-1], x_n)\n pos_diff = self.c_s_p_rav(t[1:], x_p) - self.c_s_p_rav(t[:-1], x_p)\n neg_end_vs_start = self.c_s_n_rav(t[-1], x_n) - self.c_s_n_rav(t[0], x_n)\n pos_end_vs_start = self.c_s_p_rav(t[-1], x_p) - self.c_s_p_rav(t[0], x_p)\n else:\n neg_diff = self.c_s_n(t[1:], x_n, r_n) - self.c_s_n(t[:-1], x_n, r_n)\n pos_diff = self.c_s_p(t[1:], x_p, r_p) - self.c_s_p(t[:-1], x_p, r_p)\n neg_end_vs_start = self.c_s_n(t[-1], x_n, r_n) - self.c_s_n(t[0], x_n, r_n)\n pos_end_vs_start = self.c_s_p(t[-1], x_p, r_p) - self.c_s_p(t[0], x_p, r_p)\n\n if self.operating_condition == \"discharge\":\n np.testing.assert_array_less(neg_diff, 1e-16)\n np.testing.assert_array_less(-1e-16, pos_diff)\n np.testing.assert_array_less(neg_end_vs_start, 0)\n np.testing.assert_array_less(0, pos_end_vs_start)\n elif self.operating_condition == \"charge\":\n np.testing.assert_array_less(-1e-16, neg_diff)\n np.testing.assert_array_less(pos_diff, 1e-16)\n np.testing.assert_array_less(0, neg_end_vs_start)\n np.testing.assert_array_less(pos_end_vs_start, 0)\n elif self.operating_condition == \"off\":\n np.testing.assert_array_almost_equal(neg_diff, 0)\n np.testing.assert_array_almost_equal(pos_diff, 0)\n np.testing.assert_array_almost_equal(neg_end_vs_start, 0)\n np.testing.assert_array_almost_equal(pos_end_vs_start, 0)", "def _get_cu(self):\n c_undrained=0\n #group_index = self._data['GI']\n if self.is_clayey():\n c_undrained = self.qu(self._data[SoilProperty.N60])/2\n #c_undrained=_clamp(c_undrained, 10, 103)\n # Plasix calculation needs very small c_undrained\n #if c_undrained<0.21:\n # c_undrained = 0.21\n #use 0.2 as per plasix recommendation\n return c_undrained#the cu is always 103 check with small value of n_60, some mistake maybe", "def get_cpu_mode(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetCpuMode', self.handle)", "def get_energy_effectiveness_ratio(self, obj):\n diesel_row = CreditCalculationService.get(\n category_id=obj.energy_effectiveness_ratio_category_id,\n effective_date=self.effective_date,\n fuel_class__fuel_class=\"Diesel\",\n model_name=\"EnergyEffectivenessRatio\"\n )\n\n gasoline_row = CreditCalculationService.get(\n category_id=obj.energy_effectiveness_ratio_category_id,\n effective_date=self.effective_date,\n fuel_class__fuel_class=\"Gasoline\",\n model_name=\"EnergyEffectivenessRatio\"\n )\n\n return {\n \"diesel\": diesel_row.ratio if diesel_row else None,\n \"gasoline\": gasoline_row.ratio if gasoline_row else None\n }", "def _excitonic_coft(self,SS,AG,n):\n \n # SystemBathInteraction\n sbi = AG.get_SystemBathInteraction()\n # CorrelationFunctionMatrix\n cfm = sbi.CC\n \n c0 = AG.monomers[0].get_egcf((0,1))\n Nt = len(c0)\n \n ct = numpy.zeros((Nt),dtype=numpy.complex128)\n\n # electronic states corresponding to single excited states\n elst = numpy.where(AG.which_band == 1)[0]\n for el1 in elst:\n for el2 in elst:\n if cfm.cpointer[el1-1,el2-1] == 0:\n continue\n coft = cfm.get_coft(el1-1,el2-1) \n for kk in AG.vibindices[el1]:\n for ll in AG.vibindices[el2]:\n ct += ((SS[kk,n]**2)*(SS[ll,n]**2)*coft)\n return ct", "def testRhoCrit():\n units = unitsystem.UnitSystem()\n assert units.rho_crit(0.7) == 9.204285430050004e-30\n assert units.rho_crit(1.0) == 1.8784255979693885e-29", "def chi_c_real(params):\n Qi = Q_i(params)\n Qc = params['Q_e_real'].value\n return ((4 * Qc * Qi) /\n (Qc + Qi) ** 2)", "def get_intrinsic_capacitance(self):\n nmos_stack = 4\n mult = 1\n nmos_drain_c = self.drain_c_(self.nmos_width*mult, \n nmos_stack,\n mult)\n pmos_drain_c = self.drain_c_(self.pmos_width*mult, \n 1,\n mult) \n return nmos_drain_c + pmos_drain_c", "def calculate_part(self):\r\n\r\n from math import exp\r\n\r\n self.hazard_rate_model = {}\r\n\r\n if self.hazard_rate_type == 1:\r\n self.hazard_rate_model['equation'] = 'lambdab * piQ'\r\n\r\n self._lambdab_count = self._lambdab_count[self.specification - 1]\r\n\r\n elif self.hazard_rate_type == 2:\r\n self.hazard_rate_model['equation'] = 'lambdab * piQ * piE * piCV'\r\n\r\n # Base hazard rate.\r\n _stress = (self.operating_voltage + self.acvapplied) / \\\r\n self.rated_voltage\r\n try:\r\n self.hazard_rate_model['lambdab'] = \\\r\n 0.00086 * ((_stress / 0.4)**5 + 1) * \\\r\n exp(2.5 * ((self.temperature_active + 273) /\r\n self.reference_temperature)**18)\r\n except(OverflowError, ZeroDivisionError):\r\n # TODO: Handle overflow and zero division errors.\r\n return True\r\n\r\n # Capacitance correction factor.\r\n self.piCV = 1.2 * (self.capacitance * 1000000.0)**0.095\r\n self.hazard_rate_model['piCV'] = self.piCV\r\n\r\n return Capacitor.calculate_part(self)", "def compute(self):\n\t\tdata \t= self.policy.data\n\t\tCno_mask \t= data[self.policy.finalname].squeeze()\n\t\tdata[self.name] = Cno_mask/self._protectionFactor\n\n\t\t## Setting the final values as Cout when outside and Cin between begin and end. \n\t\tif \"wear\" in self.params: \n\t\t\tif \"duration\" not in self.params: \n\t\t\t\traise ValueError(\"Must supply both wear and duration (as timedelta str)\")\n\n\t\t\tabegin = data[self.policy.datetimename][0].values + pandas.to_timedelta(self.params[\"wear\"])\n\t\t\taend = abegin + pandas.to_timedelta(self.params[\"duration\"])\n\n\t\telse:\n\t\t\tabegin = self.params.get(\"begin\",None) \n\t\t\taend = self.params.get(\"end\" ,None) \n\n\t\tabegin = data[self.policy.datetimename].to_series()[0] if abegin is None else abegin\n\t\taend = data[self.policy.datetimename].to_series()[-1] if aend is None else aend\n\n\t\tactionTimeList = data.datetime.to_series()[data[self.policy.datetimename].to_series().between(abegin,aend)]\n\t\tdata[self.policy.finalname] = data[self.name].where(data[self.policy.datetimename].isin(actionTimeList),Cno_mask)\n\t\tdata.attrs[self.actionid] = { \"type\" : self.actiontype,\"actionid\": self.actionid,\"name\" : self.name,\\\n\t\t\t\t\t\t \"params\" : {\n\t\t\t\t\t\t\t\t\"protectionFactor\" : self._protectionFactor,\n\t\t\t\t\t\t\t\t\"begin\" : abegin,\n\t\t\t\t\t\t\t\t\"end\" : aend \n\t\t\t\t\t\t\t\t},\"outputs\" : [self.name]\n\t\t\t\t\t\t }", "def discharge_coefficient(self) -> _VectorisedFloat:\n window_ratio = np.array(self.window_width / self.window_height)\n coefs = np.empty(window_ratio.shape + (2, ), dtype=np.float64)\n\n coefs[window_ratio < 0.5] = (0.06, 0.612)\n coefs[np.bitwise_and(0.5 <= window_ratio, window_ratio < 1)] = (0.048, 0.589)\n coefs[np.bitwise_and(1 <= window_ratio, window_ratio < 2)] = (0.04, 0.563)\n coefs[window_ratio >= 2] = (0.038, 0.548)\n M, cd_max = coefs.T\n\n window_angle = 2.*np.rad2deg(np.arcsin(self.opening_length/(2.*self.window_height)))\n return cd_max*(1-np.exp(-M*window_angle))", "def eccentricity(self):\n new_data = self._data[['pl_pnum', 'pl_orbper', 'pl_orbsmax',\n 'pl_masse', 'pl_orbeccen',\n 'pl_radj', 'pl_dens', 'st_teff',\n 'st_mass', 'st_rad']]\n new_data = new_data.dropna()\n\n features = new_data[['pl_pnum', 'pl_orbper', 'pl_orbsmax',\n 'pl_masse',\n 'pl_radj', 'pl_dens', 'st_teff',\n 'st_mass', 'st_rad']]\n labels = new_data['pl_orbeccen']\n\n features_train, features_test, labels_train, labels_test = \\\n train_test_split(features, labels, test_size=0.2)\n\n # Create an untrained model\n model = DecisionTreeRegressor()\n\n # Train it on the **training set**\n model.fit(features_train, labels_train)\n\n # Compute test accuracy\n test_predictions = model.predict(features_test)\n test_acc = mean_absolute_error(labels_test, test_predictions)\n test_acc_r2 = r2_score(labels_test, test_predictions)\n\n # Plot ML vs Actual\n fig, [ax1, ax2] = plt.subplots(2, figsize=(15, 12))\n\n sns.distplot(test_predictions, kde=False, ax=ax1)\n sns.distplot(labels_test, kde=False, ax=ax2)\n\n ax1.set_title('Distribution of Predicted Eccentricities of Orbits')\n ax1.set_xlabel('Eccentricity of Orbit')\n ax1.set_ylabel('Number of Planets')\n\n ax2.set_title('Distribution of Actual Eccentricities of Orbits')\n ax2.set_xlabel('Eccentricity of Orbit')\n ax2.set_ylabel('Number of Planets')\n\n plt.savefig('figures/ML_Eccentricity.png', bbox_inches='tight')\n\n return (test_acc, test_acc_r2)", "def determine_capacities(path_to_eligibility_categories, path_to_eligible_areas, path_to_statistical_roof_model,\n path_to_pv_prio_result, path_to_wind_prio_result, config):\n with rasterio.open(path_to_eligible_areas) as src:\n meta = src.meta\n areas = src.read(1)\n with rasterio.open(path_to_eligibility_categories) as src:\n eligibility_categories = src.read(1)\n flat_roof_share = pd.read_csv(path_to_statistical_roof_model).set_index(\"orientation\").loc[\n \"flat\", \"share_of_roof_areas\"\n ]\n capacities_pv_prio = _determine_capacities(areas, eligibility_categories, config, flat_roof_share, pv_prio=True)\n capacities_wind_prio = _determine_capacities(areas, eligibility_categories, config, flat_roof_share, pv_prio=False)\n _write_to_file(path_to_pv_prio_result, capacities_pv_prio, meta)\n _write_to_file(path_to_wind_prio_result, capacities_wind_prio, meta)", "def get_conductivity(self) -> float:\n try:\n datalist = self.get_data()\n data = datalist[0]\n if data.endswith('\\x00'):\n data = data.rstrip('\\x00')\n return float(data)\n else:\n return float(data)\n except Exception as err:\n print(f'get_conductivity error: {err}')\n return -1", "def test_statistics_calculator_coherence():\n from resistics.statistics.calculator import StatisticCalculator\n import numpy as np\n\n specData, evalfreq = get_spectrum_data()\n calculator = StatisticCalculator()\n calculator.winLen = 1\n assert calculator.winLen == 1\n calculator.setSpectra(specData.freqArray, specData, evalfreq)\n statData = calculator.getDataForStatName(\"coherence\")\n testData = {\n 24: {\n \"cohExHx\": 0.5462519936204147,\n \"cohExHy\": 0.13675856307435255,\n \"cohEyHx\": 0.590909090909091,\n \"cohEyHy\": 0.19523809523809524,\n },\n 40: {\n \"cohExHx\": 0.49360956503813647,\n \"cohExHy\": 0.6379980563654033,\n \"cohEyHx\": 0.6734006734006734,\n \"cohEyHy\": 0.20634920634920634,\n },\n }\n for efreq in evalfreq:\n for key, val in statData[efreq].items():\n np.testing.assert_almost_equal(val, testData[efreq][key])", "def conductor(self):\n return self._S.level()", "def open_circ():\n\n set_mode(mode_cc) # set operation mode to CC\n time.sleep(.250)\n set_CC_current(cc_current=0) # set CC mode current to 0 amps\n time.sleep(.1)\n \n oc_vals = get_input_values() # read open circuits levels\n oc_data_point = data_point(oc_vals) # create data point for open circuit measurement\n voc = oc_data_point[3] # open circuit voltage measurement\n print('Open circuit voltage: ', voc)\n write_data_tofile(oc_data_point) # write data to file\n \n return voc", "def calculate_effective_capacitance(self, load):\n c_load = load\n # In fF\n c_para = spice[\"min_tx_drain_c\"] * (self.nmos_size / parameter[\"min_tx_size\"])\n transition_prob = 0.1875\n return transition_prob * (c_load + c_para)", "def _normed_concentration_cached(self, time: float) -> _VectorisedFloat:\n return self._normed_concentration(time)", "def calculate(self):\n\n specificity = self.confusion_matrix.tn / (self.confusion_matrix.tn + self.confusion_matrix.fp)\n return 1 - specificity", "def get_duration_dispersion_convexity(self,periods=1, current_period = 0): \n discounted_cashflow = [self.payment/(1+self.interest)**i for i in self.index[current_period:]]\n discount_sum = sum(discounted_cashflow)\n weight = [cf/discount_sum for cf in discounted_cashflow]\n time_weight = [weight[i] * i for i in range(1,len(weight))]\n sum_time_weight = sum(time_weight)\n dispersion_array = [((i - sum_time_weight)**2)*weight[i] for i in range(1,len(weight))]\n dispersion_statistic = sum(dispersion_array)\n cashflow_yield = np.irr([-self.table[\"balance\"][current_period]] + [self.payment] * (self.maturity - current_period))\n convexity_array = [i * (i+1) * weight[i] for i in range(1,len(weight))]\n convexity_statistic = sum(convexity_array)/(1+cashflow_yield)**2\n convexity = (sum_time_weight ** 2 + sum_time_weight + dispersion_statistic)/(1+cashflow_yield)**2\n \n return {\"duration\":sum_time_weight/periods,\\\n \"dispersion\":dispersion_statistic/periods,\\\n \"convexity\":convexity_statistic/periods}", "def cci(self) -> float:\n return self._cci", "def _normed_concentration_limit(self, time: float) -> _VectorisedFloat:\n V = self.room.volume\n RR = self.removal_rate(time)\n \n return (self.population.people_present(time) / (RR * V) +\n self.min_background_concentration()/self.normalization_factor())", "def ci(self):\n var_assumptions = self.var_assumptions if self.var_assumptions == \"pooled\" else \"unequal\"\n ci_vals = self.comparison.zconfint_diff(self.alpha, self.hypothesis_sm, var_assumptions)\n\n return [ci_vals, self.ci_percents]", "def criticize(self, env: FakeEnv) -> Tensor:\n c = Critique(env.observation)", "def critical(self) -> pulumi.Output[Optional['outputs.InfraAlertConditionCritical']]:\n return pulumi.get(self, \"critical\")", "def evaluation_cc(self, property='clustering-coeff'):\n\n if property == 'clustering-coeff':\n rw_cc = [np.mean(clustering_coef_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(clustering_coef_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'transitivity':\n rw_cc = [np.mean(transitivity_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(transitivity_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'coreness':\n rw_cc = [np.mean(core.core_periphery_dir(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(core.core_periphery_dir(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'assortativity':\n rw_cc = [np.mean(core.assortativity_wei(self.rw_data[t], 0)) for t in range(0, self.T)]\n smth_cc = [np.mean(core.assortativity_wei(self.smth_data[t], 0)) for t in range(0, self.T)]\n elif property == 'modularity':\n rw_cc, _ = get_number_of_components(self.rw_data)\n smth_cc, _ = get_number_of_components(self.smth_data)\n elif property == 'path_length':\n rw_cc = [charpath(rw)[0] for rw in self.rw_data]\n smth_cc = [charpath(sm)[0] for sm in self.smth_data]\n\n # rw_cc_ent = get_entropy_list(rw_cc)\n # smth_cc_ent = get_entropy_list(smth_cc)\n\n return rw_cc, smth_cc", "def ility(self) -> str:\n return self.system_quality_attribute()", "def productivity(self):\n return self.zmwMetric(\"Productivity\")", "def getCoolerPower(self):\n return 90.0", "def cv(self):\n return self.close.std() / self.close.mean()", "def mode_energy2(A,m, ifreq=46, L= 20):\n\n cx = lmreshape(A.S.Cx.s2)\n cy = lmreshape(A.S.Cy.s2)\n cz = lmreshape(A.S.Cz.s2)\n\n if ifreq >0:\n em = np.sum(np.abs(cx[ifreq,:,L+m])**2+np.abs(cy[ifreq,:,L+m])**2+np.abs(cz[ifreq,:,L+m])**2)\n Et = np.sum(np.abs(cx[ifreq])**2+np.abs(cy[ifreq])**2+np.abs(cz[ifreq])**2)\n return em/Et", "def calculate(self):\n\n sensitivity = self.confusion_matrix.tp / (self.confusion_matrix.tp + self.confusion_matrix.fn)\n return 1 - sensitivity", "def CalculateProcessingCapacity(self, problemManager, mineDataManager):\n \n self.oreProcessed = np.zeros(len(mineDataManager.theMiningSystem.oreMined)) \n self.processingPower = np.zeros(len(mineDataManager.theMiningSystem.oreMined)) \n self.processingCapacity = mineDataManager.theMiningSystem.mineOreProductionCapacity # ore is processed at a constant rate\n carryOver = 0.0\n for year in range( len(mineDataManager.theMiningSystem.oreMined )-1 ):\n processedOre = carryOver + mineDataManager.theMiningSystem.oreMined[year]\n \n if(processedOre > self.processingCapacity):\n carryOver = processedOre - self.processingCapacity\n processedOre = self.processingCapacity\n else:\n carryOver = 0.0\n self.oreProcessed[year] = processedOre\n \n self.oreProcessed[-1] = carryOver + mineDataManager.theMiningSystem.oreMined[-1] # final year\n \n \n # convert tonnes processed each year to the number of Mwh based on powerlaw fit\n self.processingPower = 3.96*(self.oreProcessed )**0.703 # in Mwh\n \n referenceMetalStr = mineDataManager.theOreBody.type[:2] \n # first two letters of orebody type is assumed to be reference metal for determining processing grade\n # eg AuCu -> gold is reference metal - note that user must select correct method\n \n \n referenceMetalOreConcentration = mineDataManager.theOreBody.metalGrades[referenceMetalStr]\n\n self.concentrateMetalConcentration = 1.0\n \n # lookup concentrateMetalConcentrations based on reference metal type\n \n concentrateConcentrations = {\"Au\":0.75,\"Ag\":0.85,\"Ni\":0.1,\"Cu\":0.25,\"Pb\":0.5}\n \n # find the minimum amount of concentration needed to bring concentrate to market\n minConcentrationFactor = 1e64\n \n for metal,metalOreGrade in mineDataManager.theOreBody.metalGrades.iteritems():\n if metal in concentrateConcentrations:\n concentrateGrade = concentrateConcentrations[metal]\n concFactor = concentrateGrade/(metalOreGrade/(1.0+ mineDataManager.theMiningSystem.dilution) +1e-64)\n if concFactor < 1.0:\n concFactor = 1.0\n #print \"concFactor\", metal, concFactor, metalOreGrade, concentrateGrade\n if(concFactor < minConcentrationFactor ):\n minConcentrationFactor = concFactor\n self.concentrateMetalConcentration = concentrateGrade\n \n # concentrate is calculated based on estimate of mineral content\n self.concentrateProduced = (1.0 - self.processingLoss) \\\n *np.array(mineDataManager.theMiningSystem.oreMined)/minConcentrationFactor \n \n \n return self.processingCapacity", "def _verify_ccd_operation_mode(self, ccd_operation_mode):\n em_mode = ccd_operation_mode['em_mode']\n em_gain = ccd_operation_mode['em_gain']\n hss = ccd_operation_mode['hss']\n preamp = ccd_operation_mode['preamp']\n binn = ccd_operation_mode['binn']\n t_exp = ccd_operation_mode['t_exp']\n ccd_temp = ccd_operation_mode['ccd_temp']\n\n dic_keywords_list = [\n 'binn', 'ccd_temp', 'em_gain', 'em_mode', 'hss', 'preamp', 't_exp']\n\n for key in ccd_operation_mode.keys():\n if key not in dic_keywords_list:\n raise ValueError(\n f'The name provided is not a CCD parameter: {key}')\n\n if list(ccd_operation_mode.keys()).sort() != dic_keywords_list.sort():\n raise ValueError(\n 'There is a missing parameter of the CCD operation mode')\n\n if em_mode not in [0, 1]:\n raise ValueError(\n f'Invalid value for the EM mode: {em_mode}')\n if em_mode == 0:\n if em_gain != 1:\n raise ValueError(\n 'The EM Gain must be 1 for the Conventional'\n + f' Mode: {em_gain}')\n else:\n if em_gain not in [float, int]:\n raise ValueError(\n f'The EM gain must be a number: {em_gain}')\n elif em_gain < 2 or em_gain > 300:\n raise ValueError(\n f'EM gain out of range [2, 300]: {em_gain}')\n\n if preamp not in [1, 2]:\n raise ValueError(\n f'Invalid value for the pre-amplification: {preamp}')\n\n if hss not in [0.1, 1, 10, 20, 30]:\n raise ValueError(\n f'Invalid value for the Readout rate: {hss}')\n\n if binn not in [1, 2]:\n raise ValueError(\n f'Invalid value for the binning: {bin}')\n\n if type(t_exp) not in [float, int]:\n raise ValueError(\n f'The exposure time must be a number: {t_exp}')\n elif ccd_operation_mode['t_exp'] < 1e-5:\n raise ValueError(\n f'Invalid value for the exposure time: {t_exp}')\n\n if type(ccd_temp) not in [float, int]:\n raise ValueError(\n f'The CCD temperature must be a number: {ccd_temp}')\n if ccd_temp < -80 or ccd_temp > 20:\n raise ValueError(\n f'CCD temperature out of range [-80, 20]: {ccd_temp}')", "def get_input_capacitance(self):\n return self.gate_c(self.nmos_width+self.pmos_width)", "def costFunction(self):\n priorDiff = np.matrix(self.model.stateVector - self.model.prior).T\n measurementDiff = np.matrix(self.model.observation\n - self.model.modelCalculation).T\n chisq = measurementDiff.T * self.errSinv * measurementDiff\n chisq += priorDiff.T * self.priorSinv * priorDiff\n \n return chisq[0,0]", "def calculate(self):\n\n specificity = self.confusion_matrix.tn / (self.confusion_matrix.tn + self.confusion_matrix.fp)\n\n false_positive_rate = 1 - specificity\n\n true_positive_rate = self.confusion_matrix.tp / (self.confusion_matrix.tp + self.confusion_matrix.fn)\n\n return (true_positive_rate - false_positive_rate + 1) / 2", "def error_coefficient(self,tree,mode='exact'):\n from numpy import dot\n from sympy import Rational, simplify\n code=elementary_weight_str(tree)\n A,b,c = self.A,self.b,self.c\n\n if A.dtype == object:\n exec('coeff = simplify({} - Rational(1, {}))'.format(code, tree.density()))\n else:\n exec(\"coeff = ({} - 1.0 / {})\".format(code, tree.density()))\n return locals()[\"coeff\"] / tree.symmetry()", "def calc_process_coupling_cohesion_ratio(partitions, graph):\n cp = calc_process_coupling(partitions, graph)\n ch = calc_process_cohesion(partitions, graph)\n if cp == 0 or ch == 0:\n pccr = 0\n else:\n pccr = cp / ch\n return float(pccr)", "def cash_ratio(self):\n return self.cash / self.current_liabilities", "def compute_correlation_separability_score(self) -> float:\n sep_scores = pd.DataFrame.from_dict(self.separability_scores).to_numpy()\n sep_scores = minmax_scale(sep_scores)\n corrs = {}\n for tumor_pair in range(sep_scores.shape[1]):\n corr_sep_score = np.corrcoef(PATHO_PRIOR[:, tumor_pair], sep_scores[:, tumor_pair])\n corrs[tumor_pair] = corr_sep_score[1, 0]\n corrs['agg_with_risk'] = sum(\n np.array([val for _, val in corrs.items()]) *\n RISK\n ) \n corrs['agg'] = sum([val for key, val in corrs.items() if type(key)==int]) \n return corrs", "def carpet_mode(self):\n return CarpetModeStatus(self.send(\"get_carpet_mode\")[0])", "def CCI(self, n=PERIOD_20, constant=0.015):\n\n df = self.df\n\n def mad(v):\n \"\"\"\n Mean Absolute Devination Calculator\n \"\"\"\n return np.mean(np.abs(v - np.mean(v)))\n\n typical_price = (df.high + df.low + df.close) / 3\n\n rolled_price = typical_price.rolling(n, min_periods=0)\n\n cci = (typical_price - rolled_price.mean()) / (\n constant * rolled_price.apply(mad, True)\n )\n\n self.df[\"cci\"] = cci.fillna(0)\n\n return cci", "def comptcpquantumrate(self) :\n\t\ttry :\n\t\t\treturn self._comptcpquantumrate\n\t\texcept Exception as e:\n\t\t\traise e", "def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )", "def calculate_economics(\n irradiance: pd.DataFrame, temperature: pd.DataFrame, wind_speed: pd.DataFrame,\n CECMod: pd.DataFrame, configuration: float = 1\n ):\n p_out = calculate_dc_output(irradiance, temperature, wind_speed, CECMod=CECMod)\n\n # convert dc to AC - considering a flat loss of 14%\n # we have to improve this in the future\n p_out = [v * 0.86 for v in p_out]\n\n day_count = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n monthly_electricity = []\n\n for month in range(12):\n st_index = sum(day_count[:month + 1]) * 24\n end_index = sum(day_count[:month + 2]) * 24\n data = p_out[st_index: end_index]\n # Note: division by 50 is to match the values - remove it later!\n monthly_electricity.append(sum(data) / len(data) / 50)\n\n total_ac_energy = sum(p_out)\n monthly_ac_energy = pd.DataFrame(\n zip(calendar.month_abbr[1:], monthly_electricity),\n columns=['month', 'Thousand kWh']\n )\n\n # Based on the example here: https://nrel-pysam.readthedocs.io/en/master/Import.html\n\n grid = Grid.default(\"PVWattsCommercial\")\n ur = UtilityRate.from_existing(grid, \"PVWattsCommercial\")\n cl = Cashloan.from_existing(grid,\"PVWattsCommercial\")\n\n sam_data = read_sam_data(configuration)\n for module, data in zip([grid, ur, cl], sam_data[:-1]):\n for k, v in data.items():\n if k == 'number_inputs':\n continue\n try:\n module.value(k, v)\n except AttributeError:\n print(module, k, v)\n\n\n grid.SystemOutput.gen = p_out\n\n grid.execute()\n ur.execute()\n cl.execute()\n\n # list possible outputs here\n adjusted_installed_cost = cl.Outputs.adjusted_installed_cost\n payback_cash_flow = [-1 * x for x in cl.Outputs.cf_discounted_payback]\n\n return total_ac_energy, monthly_ac_energy, adjusted_installed_cost, payback_cash_flow", "def get_duct_linear_heat_loss_coefficient() -> float:\n return 0.49", "def maCruise(self):\n return .77", "def C(self, r, n=None):\n return self.get_coefficient(r, n)", "def get_contractive_loss(self):\n keys = list(self.head.state_dict().keys())\n W = Variable(self.head.state_dict()[keys[-2]])\n if torch.cuda.is_available():\n W = W.cuda()\n contractive_loss = torch.sum(W**2, dim=1).sum()\n return contractive_loss", "def is_critical(self):\n return str(self) in (\"AbiCritical\", \"QCritical\", \"Unconverged\", \"Error\")", "def u_crit(state, sys):\n s = state[0]\n i = state[1]\n tau = scipy.interpolate.interp1d(sys.tau.s, sys.tau.i, kind = \"cubic\")\n phi = scipy.interpolate.interp1d(sys.phi.s, sys.phi.i, kind = \"cubic\")\n cc = scipy.interpolate.interp1d(sys.commutation_curve[0],\n sys.commutation_curve[1],\n kind = \"cubic\")\n if i > sys.imax:\n return sys.umax\n if s <= sys.commutation_curve[0][-1]:\n #print(\"Case 1\")\n if s < sys.sbar or i < tau(s):\n return 0\n return sys.umax\n elif s > sys.commutation_curve[0][-1] and s < sys.commutation_curve[0][0]:\n #print(\"Case 2\")\n if ((i > tau(s)) and (i < cc(s))) or (i > sys.imax):\n return sys.umax\n elif i > cc(s) and i < sys.imax:\n return 0\n else:\n return 0\n else:\n #print(\"Case 3\")\n if i > sys.imax:\n return sys.umax\n elif s > sys.sstar and i > phi(s):\n return sys.umax\n return 0", "def mode(self) -> int:", "def calcIOC(self):\n self._calcIOC(self.planeText)", "def strategy_expensive(cookies, cps, history, time_left, build_info):\n print\n print \"STRATEGY PART BEGIN\"\n print\n items_available = []\n for item in build_info.build_items():\n items_available.append(item)\n while items_available:\n max_cost = 0\n for item in items_available:\n #print \"item:\", item, \", cost:\", build_info.get_cost(item)\n if build_info.get_cost(item) > max_cost:\n max_cost = build_info.get_cost(item)\n most_expensive = item\n print \"most expensive:\", most_expensive\n # check if time enough\n print \"checking time\"\n print \"time left:\", time_left\n print \"cost:\", max_cost\n print \"cookies can be produced:\", cps * time_left\n if cps * time_left + cookies < max_cost:\n items_available.remove(most_expensive)\n print \"not enough,\", most_expensive, \"removed\"\n print\n else:\n print most_expensive, \"chosen\"\n print \"STRATEGY PART END\"\n print\n return most_expensive", "def evaluate(self, x, path):\n\n # Read the modestats file as CSV\n mode_stats_paths = glob.glob(\"%s/*modestats.txt\" % path)\n df = pd.read_csv(mode_stats_paths[0], sep = \"\\t\")\n\n car_share = df[\"car\"].values[-1] # Share of car trips\n pt_share = df[\"pt\"].values[-1] # Share of pt trips\n\n # We construct a vector holding the *state* of the simulation. This is not\n # used by most simulators, but important, for instance, for Opdyts!\n state = [car_share, pt_share]\n\n # Here we construct an objective value. Here, we want to minimize\n # the quadratic error between the observed and reference mode shares for\n # car and public transport.\n objective = np.sqrt((car_share - self.car_reference)**2 + (pt_share - self.pt_reference)**2)\n\n # Return state and objective\n return objective, state", "def calculate_eccentricity_ratio(self):\n if not self.bearing_type == \"short_bearing\":\n warnings.warn(\n \"Function calculate_eccentricity_ratio suitable only for short bearings. \"\n \"The ratio between the bearing length and its radius should be less or \"\n \"equal to 0.25. Currently we have \"\n + str(self.length / self.radius_stator)\n + \".\"\n )\n s = self.modified_sommerfeld_number()\n coefficients = [\n 1,\n -4,\n (6 - (s ** 2) * (16 - np.pi ** 2)),\n -(4 + (np.pi ** 2) * (s ** 2)),\n 1,\n ]\n roots = np.roots(coefficients)\n for i in range(0, len(roots)):\n if 0 <= roots[i] <= 1:\n return np.sqrt(roots[i].real)\n sys.exit(\"Eccentricity ratio could not be calculated.\")", "def CIS(self):\n return self.get_class_average(self.CIS_class_level)", "def conductivity(self, T):\n m = self.mass\n mu = self.viscosity(T)\n K = (15/4) * kB * mu / m\n return K", "def coefficient(self) -> float:\n ...", "def get_eccentricity(self, ellipse):\r\n a = ellipse.get_width()\r\n b = ellipse.get_height()\r\n if b > a:\r\n a, b = b, a\r\n c = np.sqrt(a**2 - b**2)\r\n return fdiv(c, a)", "def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.31916e-05 * Tk**2\n + 1.11629e-01 * Tk\n - 2.00043e00\n )\n return thermalConductivity * 1e-3", "def conditional_component_covs(self):\n return np.array([d.conditional_cov() for d in self.conditionalMVNs])", "def ChoppinessIndex(self, timeperiod = 14):\r\n return ta.C", "def test_critic_dcor_linear(self):\n z_matrix = np.array(\n [[0.0, 0.0, 1.0],\n [0.1, 0.2, 0.8],\n [0.2, 0.4, 0.6],\n [0.3, 0.7, 0.3],\n [0.6, 0.8, 0.2],\n [0.8, 0.9, 0.1],\n [1.0, 1.0, 0.0]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"CRITIC\", \"dCor\")\n expected_w_vector = np.array(\n [0.50000000, 0.25000000, 0.25000000],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)", "def specificity():\n\tatlas = 'power'\n\tproject='hcp'\n\tdf_columns=['Task','Hub Measure','Q+/Q-','Average Edge i-j Weight',\"Strength of r's, i's PC & j's Q\"]\n\ttasks = ['REST','WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL',]\n\tknown_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)\n\tdf = pd.DataFrame(columns = df_columns)\n\tfor task in tasks:\n\t\tprint task\n\t\t# subjects = np.array(hcp_subjects).copy()\n\t\t# subjects = list(subjects)\n\t\t# subjects = remove_missing_subjects(subjects,task,atlas)\n\t\tsubjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %('hcp',task,atlas))\n\t\tstatic_results = graph_metrics(subjects,task,atlas,'fz')\n\t\tsubject_pcs = static_results['subject_pcs']\n\t\tsubject_wmds = static_results['subject_wmds']\n\t\tsubject_mods = static_results['subject_mods']\n\t\tsubject_wmds = static_results['subject_wmds']\n\t\tmatrices = static_results['matrices']\n\t\t#sum of weight changes for each node, by each node.\n\t\thub_nodes = ['WCD']\n\t\t# hub_nodes = ['PC']\n\t\tdriver_nodes_list = ['Q+','Q-']\n\t\t# driver_nodes_list = ['Q+']\n\t\tmean_pc = np.nanmean(subject_pcs,axis=0)\n\t\tmean_wmd = np.nanmean(subject_wmds,axis=0)\n\t\tmod_pc_corr = np.zeros(subject_pcs.shape[1])\n\t\tfor i in range(subject_pcs.shape[1]):\n\t\t\tmod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]\n\t\tmod_wmd_corr = np.zeros(subject_wmds.shape[1])\n\t\tfor i in range(subject_wmds.shape[1]):\n\t\t\tmod_wmd_corr[i] = nan_pearsonr(subject_mods,subject_wmds[:,i])[0]\n\t\tfor hub_node in hub_nodes:\n\t\t\tif hub_node == 'PC':\n\t\t\t\tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_pcs,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_pc_edge_corr_z.npy' %(project,task,atlas)))\n\t\t\t\tconnector_nodes = np.where(mod_pc_corr>0.0)[0]\n\t\t\t\tlocal_nodes = np.where(mod_pc_corr<0.0)[0]\n\t\t\telse:\n\t\t\t\tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_wmds,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_wmd_edge_corr_z.npy' %(project,task,atlas)))\n\t\t\t\tconnector_nodes = np.where(mod_wmd_corr>0.0)[0]\n\t\t\t\tlocal_nodes = np.where(mod_wmd_corr<0.0)[0]\n\t\t\tedge_thresh_val = 50.0\n\t\t\tedge_thresh = np.percentile(np.nanmean(matrices,axis=0),edge_thresh_val)\n\t\t\tpc_edge_corr[:,np.nanmean(matrices,axis=0)<edge_thresh] = np.nan\n\t\t\tfor driver_nodes in driver_nodes_list:\n\t\t\t\tweight_change_matrix_between = np.zeros((num_nodes,num_nodes))\n\t\t\t\tweight_change_matrix_within = np.zeros((num_nodes,num_nodes))\n\t\t\t\tif driver_nodes == 'Q-':\n\t\t\t\t\tdriver_nodes_array = local_nodes\n\t\t\t\telse:\n\t\t\t\t\tdriver_nodes_array = connector_nodes\n\t\t\t\tfor n1,n2 in permutations(range(num_nodes),2):\n\t\t\t\t\tif n1 not in driver_nodes_array:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[n2] == 0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tarray = pc_edge_corr[n1][n2]\n\t\t\t\t\tweight_change_matrix_between[n1,n2] = np.nansum(pc_edge_corr[n1][n2][np.where((known_membership!=known_membership[n2])&(np.arange(264)!=n1))])\n\t\t\t\t\tweight_change_matrix_within[n1,n2] = np.nansum(pc_edge_corr[n1][n2][np.where((known_membership==known_membership[n2])&(np.arange(264)!=n1))])\n\t\t\t\t\t# for n3 in range(264):\n\t\t\t\t\t# \tif n1 == n3:\n\t\t\t\t\t# \t\tcontinue\n\t\t\t\t\t# \tif known_membership[n3]!= known_membership[n2]:\n\t\t\t\t\t# \t\tweight_change_matrix_between[n1,n2] = np.nansum([weight_change_matrix_between[n1,n2],array[n3]])\n\t\t\t\t\t# \t\tbetween_len = between_len + 1\n\t\t\t\t\t# \telse:\n\t\t\t\t\t# \t\tweight_change_matrix_within[n1,n2] = np.nansum([weight_change_matrix_within[n1,n2],array[n3]])\n\t\t\t\t\t# \t\tcommunity_len = community_len + 1\n\t\t\t\t\t# weight_change_matrix_within[n1,n2] = weight_change_matrix_within[n1,n2] / community_len\n\t\t\t\t\t# weight_change_matrix_between[n1,n2] = weight_change_matrix_between[n1,n2] / between_len\n\t\t\t\ttemp_matrix = np.nanmean(matrices,axis=0)\n\t\t\t\tweight_matrix = weight_change_matrix_within-weight_change_matrix_between\n\t\t\t\tweight_matrix[np.isnan(weight_matrix)] = 0.0\n\t\t\t\tif hub_node == 'PC':\n\t\t\t\t\tdf_columns=['Task','Hub Measure','Q+/Q-','Average Edge i-j Weight',\"Strength of r's, i's PC & j's Q\"]\n\t\t\t\telse:\n\t\t\t\t\tdf_columns=['Task','Hub Measure','Q+/Q-','Average Edge i-j Weight',\"Strength of r's, i's WCD & j's Q\"]\n\t\t\t\tdf_array = []\n\t\t\t\tfor i,j in zip(temp_matrix[weight_matrix!=0.0].reshape(-1),weight_matrix[weight_matrix!=0.0].reshape(-1)):\n\t\t\t\t\tdf_array.append([task,hub_node,driver_nodes,i,j])\n\t\t\t\tdf = pd.concat([df,pd.DataFrame(df_array,columns=df_columns)],axis=0)\n\t\t\t\tprint hub_node, driver_nodes\n\t\t\t\tprint pearsonr(weight_matrix[weight_matrix!=0.0].reshape(-1),temp_matrix[weight_matrix!=0.0].reshape(-1))\n\t\t\t\t1/0\n\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q+') &(df['Hub Measure']=='PC')],\"Strength of r's, i's PC & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_pcqplus_%s.pdf'%(edge_thresh_val))\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q-') &(df['Hub Measure']=='PC')],\"Strength of r's, i's PC & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_pcqminus_%s.pdf'%(edge_thresh_val))\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q+') &(df['Hub Measure']=='WCD')],\"Strength of r's, i's WCD & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_wmdqplus_%s.pdf'%(edge_thresh_val))\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q-') &(df['Hub Measure']=='WCD')],\"Strength of r's, i's WCD & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_wmdqminus_%s.pdf'%(edge_thresh_val))\n\t# \"\"\"\n\t# Are connector nodes modulating the edges that are most variable across subjects?\n\t# \"\"\"\n\t# atlas='power'\n\t# known_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)\n\t# for task in tasks:\n\t# \tpc_thresh = 75\n\t# \tlocal_thresh = 25\n\t# \tsubjects = np.array(hcp_subjects).copy()\n\t# \tsubjects = list(subjects)\n\t# \tsubjects = remove_missing_subjects(subjects,task,atlas)\n\t# \tstatic_results = graph_metrics(subjects,task,atlas)\n\t# \tsubject_pcs = static_results['subject_pcs']\n\t# \tsubject_wmds = static_results['subject_wmds']\n\t# \tmatrices = static_results['matrices']\n\t# \tmatrices[:,np.nanmean(matrices,axis=0)<0.0] = np.nan\n\t# \tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_wmds,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_wmd_edge_corr_z.npy' %(project,task,atlas)))\n\t# \t# pc_edge_corr = pc_edge_correlation(subject_pcs,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_pc_edge_corr_z.npy' %(project,task,atlas))\n\t# \tstd_mod = []\n\t# \ttstd = np.std(matrices,axis=0).reshape(-1)\n\t# \tfor i in range(num_nodes):\n\t# \t\tstd_mod.append(nan_pearsonr(pc_edge_corr[i].reshape(-1),tstd)[0])\n\t# \t# print task, pearsonr(np.nanmean(subject_pcs,axis=0),std_mod)\n\t# \tprint task, pearsonr(np.nanmean(subject_wmds,axis=0),std_mod)\n\t# \tplot_corr_matrix(np.std(matrices,axis=0),network_names.copy(),out_file=None,plot_corr=True,return_array=False)", "def hardwareConcurrency(self):\n return 1", "def calc_capital_costs (self):\n road_needed = 'road needed'\n if self.cd['on road system']:\n road_needed = 'road not needed'\n\n dist = self.comp_specs['distance to community']\n self.capital_costs = self.comp_specs['est. intertie cost per mile']\\\n [road_needed] * dist\n #~ print self.capital_costs" ]
[ "0.6149671", "0.581926", "0.5749423", "0.569552", "0.5639848", "0.5627742", "0.54855204", "0.547161", "0.5445516", "0.5431224", "0.5431224", "0.5400731", "0.539497", "0.5374824", "0.53717357", "0.53432816", "0.53290373", "0.5251396", "0.5239214", "0.5225909", "0.52176267", "0.52129865", "0.5212111", "0.5185494", "0.5176456", "0.5153953", "0.51522434", "0.51389116", "0.5108257", "0.50961745", "0.50952965", "0.50945824", "0.50889766", "0.5088274", "0.5058799", "0.50555253", "0.5051179", "0.50417054", "0.50393224", "0.5035122", "0.50278616", "0.50263536", "0.5023323", "0.5007884", "0.5005027", "0.4985053", "0.4983871", "0.49795476", "0.49792966", "0.4977308", "0.49752355", "0.4974013", "0.49662793", "0.49624008", "0.49528044", "0.49494734", "0.49380636", "0.4936694", "0.4930398", "0.490777", "0.49021843", "0.49011514", "0.489625", "0.48959783", "0.48916864", "0.4888428", "0.48844102", "0.4874366", "0.48632818", "0.48618108", "0.4855619", "0.48545298", "0.48534584", "0.48511097", "0.48439166", "0.48415783", "0.48371062", "0.4834213", "0.48313507", "0.48249304", "0.48127314", "0.4809927", "0.48060516", "0.48040417", "0.47998613", "0.4796847", "0.47950578", "0.47938955", "0.47935516", "0.47930384", "0.4792695", "0.4792263", "0.4791333", "0.47907728", "0.47816223", "0.47757164", "0.47717714", "0.47694027", "0.47674137", "0.4765634" ]
0.7476894
0
List of signals is split into lists of single signals
def test_process_signal_list(self): input_signals = [Signal({"hello": "n.io"}), Signal({"hello": "n.io"}), Signal({"hello": "n.io"})] blk = SignalListSplitter() self.configure_block(blk, {}) blk.start() # one list of signals is processed blk.process_signals(input_signals) blk.stop() self.assert_num_signals_notified(3) # and one list of one signal has been notified for each signal self.assertEqual(len(self.notified_signals[DEFAULT_TERMINAL]), 3) for signal_list in self.notified_signals[DEFAULT_TERMINAL]: self.assertEqual(len(signal_list), 1) self.assertEqual(signal_list[0].to_dict(), { "hello": "n.io", })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_process_signal_list_of_one(self):\n input_signals = [Signal({\"hello\": \"n.io\"})]\n blk = SignalListSplitter()\n self.configure_block(blk, {})\n blk.start()\n # one list of signals is processed\n blk.process_signals(input_signals)\n blk.stop()\n self.assert_num_signals_notified(1)\n # and one list of one signal has been notified for each signal\n self.assertEqual(len(self.notified_signals[DEFAULT_TERMINAL]), 1)\n for signal_list in self.notified_signals[DEFAULT_TERMINAL]:\n self.assertEqual(len(signal_list), 1)\n self.assertEqual(signal_list[0].to_dict(), {\n \"hello\": \"n.io\",\n })", "def add(self, signal_list):\n result = []\n for signals in signal_list:\n result.append(\n signals * signal.blackmanharris(\n len(signals),\n sym=False\n )\n )\n return result", "def signal(self) -> list:\n raise NotImplementedError(\"You must implement signal\")", "def read_all_signals(self):\n return [pio.sample(signal_idx)\n for signal_idx in self._signals_idx]", "def _signals(cls, idx, m, n):\n import numpy as np\n signal = []\n\n # Generating all the frequencies from a time series of length n\n fs = np.fft.fftfreq(n)\n\n # Loop through the frequencies in idx\n for i in idx:\n freq = fs[i]\n\n # Computing the sinusoids for the ith frequency\n signal.append(np.cos(2 * np.pi * m * freq) + complex(0, np.sin(2 * np.pi * m * freq)))\n return np.array(signal)", "def _remove_redundant_signals(self, signals: List[Signal]):\n open_positions = self._broker.get_positions()\n tickers_with_open_positions = set(\n self._contract_to_ticker(position.contract()) for position in open_positions\n )\n\n signals_with_suggested_exposure_out = [signal for signal in signals if\n signal.suggested_exposure == Exposure.OUT]\n redundant_signals = [signal for signal in signals_with_suggested_exposure_out\n if signal.ticker not in tickers_with_open_positions]\n\n for signal in redundant_signals:\n signals.remove(signal)", "def test_add_signals():\n x = np.linspace(390, 410, 200)\n doublet = [(399, 1), (401, 1)]\n y = add_signals(x, doublet, 1)\n X = np.array([x for x, _ in ADD_SIGNALS_DATASET])\n Y = np.array([y / 2 for _, y in ADD_SIGNALS_DATASET]) # scale to match\n print(y)\n print(Y)\n assert np.array_equal(x, X)\n assert np.array_equal(y, Y)", "def _read_signals(edf_file, header):\n signals = OrderedDict([(label, []) for label in header['label']])\n\n while True:\n try:\n record = _read_record(edf_file, header)\n except EOFError:\n break\n\n for label, signal in record.items():\n signals[label].append(signal)\n\n for label, signal in signals.items():\n signals[label] = np.concatenate(signal)\n\n return signals", "def signals(self):\n if self._current_inputs is None:\n raise RuntimeError(\n 'Internal Error: The current inputs have not been properly '\n 'generated. First call features_and_labels, then call signals.')\n signals = self._current_inputs['signals']\n self._current_inputs = None\n return signals", "def _sampleSignals(self):\n mSig_ = np.hstack((self.mSig, np.nan*np.zeros((self.nSigs,1)))) # Add nan to the end of signals\n\n # Generate matrix with patterns where NaN is changed to index of NaN in mSig_\n mPattsRep_ = self.mPattsRep.copy()\n (_, nSamps) = self.mSig.shape # Get the number of samples in the signals\n mPattsRep_[np.isnan(self.mPattsRep)] = nSamps # Change nan into pointer to nan\n \n self.mObSig = (mSig_[np.arange(self.nSigs), mPattsRep_.T.astype(int)]).T # Generate correct observation signals\n return", "def getEDFsignals(edf):\n n = edf.signals_in_file\n samples = edf.getNSamples()[0]\n signals = np.zeros((n, samples))\n for i in range(n):\n try:\n signals[i,:] = edf.readSignal(i)\n except:\n pass\n return signals", "def Signals(sigtype, num_sigs):\n assert isinstance(sigtype, (bool, intbv))\n sigs = [Signal(sigtype) for _ in range(num_sigs)]\n return sigs", "def get_output_bands(self):\n dlist=self.dest_list.children()\n out_list=[]\n for item in dlist:\n out_list.append((self.output_bands[item][0],\n self.output_bands[item][1]))\n return out_list", "def getSplitDetectorSignal(self):\r\n\t\treturn self.splitData", "def get_signal_info(self, signal_names):\n result = []\n for name in signal_names:\n description = self._pio.signal_description(name)\n domain_type = self._pio.signal_domain_type(name)\n aggregation, format_type, behavior = self._pio.signal_info(name)\n result.append((name, description, domain_type, aggregation, format_type, behavior))\n return result", "def split_registrations(list_of_registrations):\n list_of_registrations.sort(key=lambda registration: registration.service)\n\n sub_list = []\n main_list = []\n previous = list_of_registrations[0]\n\n for registration in list_of_registrations:\n if previous.service == registration.service:\n sub_list.append(registration)\n else:\n main_list.append(sub_list)\n sub_list = [registration]\n previous = registration\n\n main_list.append(sub_list)\n return main_list", "def GetSignals(cls):\n return []", "def pick_signals(processor, source = 'input'):\n\n if source == 'input':\n bin_edges = processor.input_parameters['bin_edges']\n raw_signal = processor.input_signal\n elif source == 'output':\n bin_edges = processor.output_parameters['bin_edges']\n raw_signal = processor.output_signal\n else:\n raise ValueError('Unknown value for the data source')\n t = np.zeros(len(raw_signal)*4)\n bins = np.zeros(len(raw_signal)*4)\n signal = np.zeros(len(raw_signal)*4)\n value = 1.\n\n for i, edges in enumerate(bin_edges):\n t[4*i] = edges[0]\n t[4*i+1] = edges[0]\n t[4*i+2] = edges[1]\n t[4*i+3] = edges[1]\n bins[4*i] = 0.\n bins[4*i+1] = value\n bins[4*i+2] = value\n bins[4*i+3] = 0.\n signal[4*i] = 0.\n signal[4*i+1] = raw_signal[i]\n signal[4*i+2] = raw_signal[i]\n signal[4*i+3] = 0.\n value *= -1\n\n z = t * c\n return (t, z, bins, signal)", "def _process_group(self, signals, group, input_id, signals_to_notify):\n signals_to_notify = []\n for signal in signals:\n state_change = self._process_state(signal, group)\n if state_change is not None:\n # If we are excluding existing fields we want to add\n # the states and previous states to an empty signal\n if self.exclude():\n signal = Signal()\n setattr(signal,\n 'prev_{}'.format(self.state_name()), state_change[0])\n setattr(signal, '{}'.format(\n self.state_name()), state_change[1])\n setattr(signal, 'group', group)\n signals_to_notify.append(signal)\n return signals_to_notify", "def split_list(self):\n wanted_parts = self.args.ncore\n alist = glob.glob(self.args.input + '*.root')\n length = len(alist)\n return [alist[i * length // wanted_parts: (i + 1) * length // wanted_parts]\n for i in range(wanted_parts)]", "def get_signals(self, symbols):\n buy = []\n sell = []\n hold = []\n for ticker in symbols:\n print(ticker)\n params = {\n 'symbol' : ticker,\n 'interval' : 'daily',\n 'series_type' : 'close',\n 'fastperiod': self.fastperiod,\n 'slowperiod':self.slowperiod,\n 'signalperiod':self.signalperiod\n }\n macdjson = av_call('MACD', params)\n\n if ticker == 'AMGN':\n print(macdjson)\n\n macd = get_df_from_av_json(macdjson)\n if macd['MACD'][-1] > macd['MACD_Signal'][-1]:\n buy.append(ticker)\n elif macd['MACD'][-1] > macd['MACD_Signal'][-1]:\n sell.append(ticker)\n else:\n hold.append(ticker)\n \n return {\n 'buy': buy,\n 'sell': sell,\n 'hold': hold\n }", "def removeDuplicates(self,covariateList,bands):\n\t\t\n\t\treturn [elem for elem in covariateList if elem not in bands]", "def prepare_signals_for_rendering(multiple_beams):\n logging.debug('running the prepare_signals_for_rendering function')\n multiple_envelopes = []\n\n for i, single_beam in enumerate(multiple_beams):\n logging.debug('Running through single beam signal number %d', i)\n rectified_signal = abs(single_beam)\n window_size = determine_window_size(rectified_signal)\n logging.debug('window_size determined to to be %d', window_size)\n single_envelope = envelope_detect(rectified_signal, window_size)\n multiple_envelopes.append(single_envelope)\n\n multiple_envelopes = log_compress(multiple_envelopes)\n multiple_envelopes = account_for_distance(multiple_envelopes)\n multiple_envelopes = account_for_harmonics(multiple_envelopes)\n\n return multiple_envelopes", "def averaging(signal_list):\n #check if their length match\n N = len(signal_list)-1\n len_0 = len(singal_list[0])\n i = 1\n while (i <= N):\n if (len_0 == len(signal_list[i])):\n i = i+1\n else:\n break\n same_length = (i > N)\n\n #loop of averaging frame by frame\n if same_length:\n avg_arr = np.zeros(signal_list[0].shape, dtype=float)\n for i in range(len(signal_list[0])):\n sum = 0\n for j in range(len(signal_list)):\n sum = sum + signal_list[j][i]\n avg_arr[i] = sum/len(signal_list)\n else:\n print(\"The signals should be uniform in length\")\n\n return avg_arr", "def split(self, X):", "def convert(filename, signal=sampleArray(), \n centerEta = DEFCENTERETA, centerPhi = DEFCENTERPHI): \n numEvents = 0\n listOfSignals = []\n # First we open the file\n with open(filename, \"r\") as f:\n content = f.readlines()\n numEvents = len(content)\n for i in range(0, numEvents):\n if i % 1000 == 0:\n print(i)\n thisEvent = content[i]\n try:\n reducedEvent = convertEvent(\n thisEvent, centerEta, centerPhi, signal.shape[0], signal.shape[1]\n ) \n listOfSignals.append(reducedEvent)\n except indexError:\n 0\n print(\"Converted\", len(listOfSignals), \"out of\", numEvents, \"events\")\n return listOfSignals", "def caculate_signals(self):\n\t\traise NotImplementedError(\"Should implement calculate_signals()\")", "def split(self, x):\r\n new_beams = np.array([])\r\n for bar in self.bar_elements:\r\n new_beams = np.concatenate((new_beams, bar.split(x)))\r\n return BeamElements(new_beams)", "def read_list(f, nb_freqs):\n alist = []\n while len(alist) < nb_freqs:\n line = f.readline()\n splitted = line.split()\n well_splitted = True\n for entry in splitted:\n well_splitted = well_splitted and entry.count('.') <= 1\n if well_splitted:\n entries = splitted\n else:\n if line.count('-') > 0:\n # Probably coming from an SDSS spectrum.\n entries = [line[i:i+12] for i in range(0, len(line) - 1, 12)]\n else:\n entries = [line[i:i+8] for i in range(0, len(line) - 1, 8)]\n for entry in entries:\n try:\n alist.append(float(entry))\n except ValueError:\n # If conversion to float fails, put 0 instead.\n alist.append(0)\n return numpy.array(alist)", "def calculate_signals(self):\n raise NotImplementedError(\"Should implement calculate_signals()\")", "def simulation_to_lines(data: List(Float))->List(Tuple(Int, Float)):\n result = []\n counter = 0\n for payoff in data:\n result = result + [(counter, payoff)]\n counter+=1\n return result\n\n #print(str(result))", "def waveforms(self):\n return list(self._waveforms)", "def filter_signals(self, df_phys):\n if not df_phys.empty and len(self.signals):\n df_phys = df_phys[df_phys[\"Signal\"].isin(self.signals)]\n\n return df_phys", "def build_wires(self) -> List[Segment]:\n segments = [Segment(self.ORIGIN, self.ORIGIN.move_to(self._diagram[0]))]\n for step in self._diagram[1:]:\n segments.append(segments[-1].wire_to(step))\n\n return segments", "def split_train_eval(full_list):\n tr_list = []\n ev_list = []\n random.shuffle(full_list)\n tot = len(full_list)\n tot80 = int(0.8 * tot)\n for rg in range(tot):\n if rg < tot80:\n tr_list.append(full_list[rg])\n else:\n ev_list.append(full_list[rg])\n return [tr_list, ev_list]", "def generate_signals(self):\n signals = {}\n\n # Create the set of short and long simple moving averages over the \n # respective periods\n signals['short_mavg'] = self.bars.rolling(window=self.short_window).mean()\n signals['long_mavg'] = self.bars.rolling(window=self.long_window).mean()\n\n\n return signals", "def reconstruct_signal(_X):\n width = _X.shape[1]\n N = _X.shape[0]\n n = N // 2\n\n head = _X[:n, 0]\n tail = _X[n:, width - 1]\n body = np.array([_X[n:, i] + _X[:n, i + 1] for i in range(width - 1)]).reshape(n * (width - 1))\n\n return np.append(head, np.append(body, tail))", "def signal_to_training( # pylint: disable=too-many-locals\n self,\n signal: Union[Dict, List[Dict]]\n ) -> Tuple[np.ndarray, Tuple[np.ndarray, ...], np.ndarray, Dict[str, Any]]:\n dict_list = list(signal) if isinstance(signal, list) else list((signal, ))\n\n # Initialize the return values\n time_length = len(dict_list[0]['signal']['time']['data']) # type: ignore\n length = int(time_length / 2)\n signals = np.zeros((0, time_length))\n result_r = np.zeros((0, length))\n result_b = np.zeros((0, length))\n result_h = np.zeros((0, length))\n result_m = np.zeros((0, length))\n result_p = np.zeros((0, length))\n answer = np.zeros((0, length))\n config = {\n 'SNR': [],\n 'count': [],\n 'frequencies': [],\n 'amplitudes': [],\n 'minamplitude': [],\n 'mindist': []\n } # type: Dict[str, Any]\n\n # Calculate window functions\n window_bartlett = np.bartlett(time_length)\n window_hanning = np.hanning(time_length)\n window_meyer = self._meyer_wavelet(time_length)\n window_poisson = exponential(time_length, sym=True, tau=(time_length/2)*(8.69/60.0))\n\n # Loop all data entries\n for data in dict_list:\n time = np.asarray(data['signal']['time']['data'])\n signals = np.concatenate((signals, np.reshape(time, (1,) + time.shape)))\n config['SNR'].append(data['signal']['SNR'])\n\n # Assemble the FFTs\n fft = np.fft.fft(time)[:length] / time_length\n result_r = np.concatenate((result_r, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_bartlett)[:length] / time_length\n result_b = np.concatenate((result_b, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_hanning)[:length] / time_length\n result_h = np.concatenate((result_h, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_meyer)[:length] / time_length\n result_m = np.concatenate((result_m, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_poisson)[:length] / time_length\n result_p = np.concatenate((result_p, np.reshape(fft, (1,) + fft.shape)))\n\n # Assemble all the frequencies and amplitudes\n count = 0\n freqs = []\n ampls = []\n counting = np.zeros((1, length))\n for subsig in data['signal']['parts']:\n if subsig['signal']['type'] == 'SingleOscillation':\n count += 1\n freq = subsig['signal']['frequency']\n counting[0, int(max(0, min(length - 1, round(freq))))] += 1\n freqs.append(freq)\n ampls.append(subsig['signal']['amplitude'])\n config['count'].append(count)\n\n # Sort frequencies and amplitudes by frequency\n np_freqs = np.asarray(freqs)\n sorting = np.unravel_index(np.argsort(np_freqs), np_freqs.shape)\n np_freqs = np_freqs[sorting]\n np_ampls = np.asarray(ampls)[sorting]\n\n # Assemble some statistics\n config['mindist'].append(999999. if len(np_freqs) < 2 else np.min(np.diff(np_freqs)))\n config['minamplitude'].append(np.min(np_ampls) if len(np_ampls) > 0 else 999999.)\n config['frequencies'].append(np_freqs)\n config['amplitudes'].append(np_ampls)\n answer = np.concatenate((answer, counting))\n\n # Assemble results\n ffts = (result_r, result_b, result_h, result_m, result_p)\n return signals, ffts, answer, config", "def signal_filter(signal_processor):\n while True:\n synapse, data = (yield)\n try:\n signal = Signal.deserialize(data)\n except ProtocolError, reason:\n # low-level protocol error (e.g transmission of '[1,2' )\n #logs.logger.debug(\"filter error: %s,%s\" % (synapse, reason))\n err_response = Signal('error', (Signal.ProtocolError, str(reason)))\n synapse.transmit(err_response)\n synapse.disconnect()\n else:\n signal_processor.send((synapse, signal))", "def test_read_multiple_specified_signals(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n test_dir = os.path.join(cwd, 'test_files/')\n signals = read_signals(test_dir, ['test2', 'test3'])\n self.assertEquals(len(signals), 2)", "def split(self, x):\r\n if x >= self.n2.x or x <= self.n1.x: return [self]\r\n n_intermediate = Node.MiddleNode(x=x)\r\n bar1 = BeamElement(nodes=[self.n1, n_intermediate], section=self.section, material=self.material)\r\n bar2 = BeamElement(nodes=[n_intermediate, self.n2], section=self.section, material=self.material)\r\n return [bar1, bar2]", "def calculate_signals(self, event: MarketEvent):\n for symbol, bars in event.symbol_data.items():\n if not self.bought[symbol]:\n signal = SignalEvent(bars[-1].symbol, bars[-1].time, 'LONG')\n self.events.add_event(signal)\n self.bought[symbol] = True", "def visit_list(self, sylist):\n for value in sylist:\n child = type(value)(value.container_type)\n self.current.append(child)\n value.visit(SpineCopyVisitor(child))", "def binn_fft(self):\n bin_res = []\n for fft_bin in BINS:\n bin_res.append(self.bin_spec_y(fft_bin[0], fft_bin[1]))\n return bin_res", "def input_slice(self, inputs):\n result = []\n for i in range(int(len(inputs) / self.window_size)):\n result.append(inputs[i * self.window_size:(i + 1) * self.window_size])\n return result", "def get_signals(self):\n return QFDataFrame(data=self._signals, index=self._signals_dates)", "def build_symptoms(self, signals):\n\n symptoms = []\n for signal in signals:\n new_symptom = symptom.Symptom(self.symptom_tag, self.action_msg, signal)\n symptoms.append(new_symptom)\n\n return symptoms", "def extract_onset_events(bin_path, chanList, chunk_size=4000):\n meta = readMeta(bin_path)\n sRate = SampRate(meta)\n\n n_samples = int(float(meta['fileTimeSecs']) * sRate)\n n_chunks = sp.floor(n_samples / chunk_size).astype('int32')\n print(\"leftover samples: %i\" % (n_samples % n_chunks))\n\n rawData = makeMemMapRaw(bin_path, meta)\n\n events = []\n for ch in chanList:\n inds = []\n\n # get digital data for the selected lines\n for i in tqdm(range(n_chunks)):\n start = i * chunk_size\n stop = start + chunk_size\n\n digArray = ExtractDigital(rawData, start, stop, 0, range(8), meta)\n trig_data = digArray[ch,:]\n\n ix = sp.where(sp.diff(trig_data) == 1)[0]\n inds.append(ix+start)\n # if len(ix) > 0:\n # print(len(ix))\n\n inds = sp.concatenate(inds)\n times = inds / sRate\n events.append([inds,times])\n\n return events", "def bin_the_data(neuron_spikes, first, last, bin_size):\n neuron_activity = []\n timebins = range(first, int(last) + int(last) % bin_size, bin_size)\n for spike in neuron_spikes:\n activity = []\n spike_time = spike[0]\n i = 0\n for bin_size in timebins:\n k = 0\n while spike_time < bin_size:\n i += 1\n if i >= np.size(spike):\n break\n spike_time = spike[i]\n k += 1\n activity.append(k)\n neuron_activity.append(activity)\n return neuron_activity, timebins", "def generate_sequential_events_list(generator_spec_list):\n\n data = []\n for spec in generator_spec_list:\n generator = spec[tg.GENERATOR]\n data += tg.generate_round_robin_data_stream(generator.models,\n spec[tg.NUM_EVENTS])\n return data", "def _convert_to_multi_segment(self):\n\n self.header['nb_segment'] = [self.info['n_episodes']]\n\n # drop repeated signal headers\n self.header['signal_channels'] = \\\n self.header['signal_channels'].reshape(\n self.info['n_episodes'], -1)[0]\n\n # reshape signal memmap list\n new_sig_memmaps = []\n n_channels = len(self.header['signal_channels'])\n sig_memmaps = self._raw_signals[0]\n for first_index in np.arange(0, len(sig_memmaps), n_channels):\n new_sig_memmaps.append(\n sig_memmaps[first_index:first_index + n_channels])\n self._raw_signals = new_sig_memmaps\n\n self.logger.debug('New number of segments: {}'.format(\n self.info['n_episodes']))\n\n return", "def split_list_by(lst, sepfunc, includesep):\n\tblocks = []\n\tblock = []\n\tfor elem in lst:\n\t\tif sepfunc(elem):\n\t\t\tif includesep:\n\t\t\t\tblock.append(elem)\n\t\t\tblocks.append(block)\n\t\t\tblock = []\n\t\telse:\n\t\t\tblock.append(elem)\n\tif len(block):\n\t\tblocks.append(block)\n\treturn blocks", "def gen_list(self, x_list, z, s, nsamp):\n x_list = self.transform_xin_list(x_list)\n pred_list = self.sample_gp_pred(nsamp, x_list)\n pred_list = [self.dt.inv_transform_y_data(pr) for pr in pred_list]\n return pred_list", "def merge_one_sensor(slist):\n r = strip_file(slist[0],leave_header=True)\n for s in slist[1:]:\n r += strip_file(s,leave_header=False)\n return r", "def _split_inputs_outputs(self, data):\n\n\t\tinputs = []\n\t\toutputs = []\n\n\t\tfor point in data:\n\t\t\tinputs.append(point[0])\n\t\t\toutputs.append(point[1])\n\n\t\treturn np.array(inputs), np.array(outputs)", "def convert_to_list(self): \n self.reads = list(self.reads)\n self.sampling = False", "def _process_egocentric(self, signal: egocentric.EgocentricSignal):\n output_signals = []\n output_signals += self._process_egocentric_direction(\n self._get_hparam('egocentric_direction_mode'),\n signal.xz_direction,\n signal.yz_direction)\n output_signals += self._process_egocentric_distance(\n self._get_hparam('egocentric_distance_mode'),\n signal.distance)\n return output_signals", "def synthesize1(amps, freqs, ts):\n components = [thinkdsp.ComplexSinusoid(freq, amp)\n for amp, freq in zip(amps, freqs)]\n signal = thinkdsp.SumSignal(*components)\n\n ys = signal.evaluate(ts)\n return ys", "def get_samples_per_signal(self):\n return np.array([self.samples_in_file(chn) for chn in range(self.signals_in_file)])", "def calculate_signals(self):\n\t\traise NotImplementedError(\n\t\t\t\"Should implement calculate_signals()\\n\" + \\\n\t\t\t\"By calling this method to calculate 'Signal' Events\"\n\t\t)", "def base_to_signal_mapping(grp):\n\n position_in_signal = [0 for _ in range(5)]\n for i in range(1, len(grp)):\n position_in_signal += [i for _ in range(grp[i][5])]\n # position_in_signal += [grp[i][0] for _ in range(grp[i][5])]\n\n # print(position_in_signal)\n return position_in_signal", "def generate_signals(self):\n signals = {}\n \n\n # Create the set of short and long exponential moving averages over the \n # respective periods\n signals['short'] = self.bars.ewm(span = self.short_window , min_periods=self.long_window-1).mean()\n signals['long'] = self.bars.ewm(span = self.long_window , min_periods=self.long_window-1).mean()\n signals['MACD'] = signals['short'] - signals['long']\n signals['MACDsign'] = signals['MACD'].ewm(span = self.signal_window , min_periods=self.long_window-1).mean()\n signals['MACDdiff'] = signals['MACD'] - signals['MACDsign']\n\n \n return signals", "def smart_split(strokes):\n\n splited = []\n for stroke in strokes:\n splited += stroke.split_non_differentiable_points()\n return splited", "def postgen_list(self, x_list, s, nsamp):\n x_list = self.transform_xin_list(x_list)\n pred_list = self.sample_gp_post_pred(\n nsamp, x_list, full_cov=True, nloop=np.min([50, nsamp])\n )\n pred_list = [self.dt.inv_transform_y_data(pr) for pr in pred_list]\n return pred_list", "def rawSignals(obars, window=21, nbands=3, inc=0.5, save=True):\n bars = obars.copy() # avoid warnings\n bars['OHLC'] = np.nan # typical price\n bars.OHLC.values[:] = np.mean(bars.values[:,0:4], axis=1) # 1000x faster\n price = bars.OHLC.values\n for i in range(nbands):\n upband, sma, lwband = ta.BBANDS(price, window*inc)\n if save: # for plotting stuff\n bars['bandlw'+str(i)] = lwband\n bars['bandup'+str(i)] = upband\n bars['bandsg'+str(i)] = 0 # signal for this band\n signals = fastbollingerSignal(price, upband, lwband)\n bars.loc[:, 'bandsg'+str(i)] = signals.astype(int) # signal for this band\n inc += 0.5\n bars.dropna(inplace=True)\n return bars", "def stream2inputs(stream):\r\n inputs = list(stream.sorted.flat.getElementsByClass([\"Note\", \"Chord\", \"Rest\"]))\r\n return [MidiLSTM._to_midi_values(x) for x in inputs]", "def reformatList( listOfPaths):\n newList = []\n first = True\n for seg in listOfPaths: \n newList += seg.asSVGCommand(first)\n first = False\n return newList", "def make_lists(sv):\r\n \r\n mark_delayed(sv) # identify delayed objects\r\n make_pin_list(sv) # detect and initialize inputs (to false) \r\n make_old_list(sv) # create a list of used old/old \r", "def InSlotsGet(self):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotsGet', body_size=0)\n \n self.NanonisTCP.send_command(hex_rep)\n \n response = self.NanonisTCP.receive_response()\n \n # signals_names_size = self.NanonisTCP.hex_to_int32(response[0:4])\n signals_names_num = self.NanonisTCP.hex_to_int32(response[4:8])\n \n idx = 8\n signal_names = []\n for n in range(signals_names_num):\n size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n idx += 4\n signal_name = response[idx:idx+size].decode()\n idx += size\n signal_names.append(signal_name)\n \n signal_indexes = []\n signal_indexes_size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n for n in range(signal_indexes_size):\n idx += 4\n signal_index = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n signal_indexes.append(signal_index)\n \n return [signal_names,signal_indexes]", "def test_read_multiple_signals(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n test_dir = os.path.join(cwd, 'test_files/')\n signals = read_signals(test_dir)\n self.assertEqual(len(signals), 3)\n self.assertTrue(\n any(signal for signal in signals if signal.name == \"test1\"))\n self.assertTrue(\n any(signal for signal in signals if signal.name == \"test2\"))\n self.assertTrue(\n any(signal for signal in signals if signal.name == \"test3\"))", "def signal_to_frames(signal, frame_len, frame_step, win_func=None):\n assert signal.ndim == 1\n\n signal_len = len(signal)\n frame_len = int(round(frame_len))\n frame_step = int(round(frame_step))\n num_frames = number_frames(signal_len, frame_len, frame_step)\n\n indices = indices_grid(frame_len, frame_step, num_frames)\n framed_signal = signal[indices]\n\n if win_func is not None:\n framed_signal = win_func(framed_signal)\n\n remain_signal = []\n # Add plus one to get first index\n # that is not in framed_signal\n max_idx = np.max(indices) + 1\n if max_idx <= signal_len - 1:\n remain_signal = np.r_[remain_signal, signal[max_idx:]]\n\n return framed_signal, remain_signal", "def split_multiple_recordings(audio, min_silence_duration=0.25, noise_threshold=150, sample_rate_hz=8e3):\n # A list of tuples (start, stop)\n min_silence_frame = sample_rate_hz * min_silence_duration\n silence_zones = []\n\n zone_start = None\n zone_end = None\n\n for idx, point in enumerate(audio):\n if abs(point) < noise_threshold and zone_start is None:\n zone_start = idx\n\n if abs(point) > noise_threshold and zone_start is not None:\n zone_end = idx\n\n # If we are in a silent zone and we come to the end point\n if zone_start is not None and zone_end and abs(point) > noise_threshold:\n if (zone_end - zone_start) > min_silence_frame:\n silence_zones.append((zone_start, zone_end))\n\n zone_start = None\n zone_end = None\n\n # Split the recording by the zones\n split_recordings = []\n for idx, zone in enumerate(silence_zones):\n if idx == 0:\n start = 0\n else:\n start = silence_zones[idx - 1][1]\n\n end = zone[0]\n split_recordings.append(audio[start:end])\n\n return split_recordings", "def separate_all_data(data_arrays_list):\n separated_data_list = []\n for i in range(0, len(data_arrays_list)):\n separated_data_list.append(separate_data(data_arrays_list[i]))\n return separated_data_list", "def signalAll(self, signal, startswithname=None):\n for name in self.processes.keys():\n if startswithname is None or name.startswith(startswithname):\n self.signalProcess(signal, name)", "def transform_to_one_chanel_data(train_data_lst, test_data_lst, data_anots):\r\n num_chanles = train_data_lst[0].shape[2]\r\n train_data_chanels = []\r\n test_data_chanels = []\r\n new_anots = [] \r\n for data_idx in range(len(train_data_lst)):\r\n signals = train_data_lst[data_idx]\r\n test_signals = test_data_lst[data_idx]\r\n \r\n for ch_idx in range(num_chanles):\r\n train_one_chanel = select_one_chanel(signals, ch_idx)\r\n train_data_chanels.append(train_one_chanel)\r\n \r\n test_data_one_chanel = select_one_chanel(test_signals, ch_idx)\r\n test_data_chanels.append(test_data_one_chanel)\r\n new_anots.append(data_anots[data_idx]+'_'+str(ch_idx)) \r\n return train_data_chanels, test_data_chanels, new_anots", "def calculate_signals(self, event):\n if event.type == 'MARKET':\n for s in self.symbol_list:\n bars = self.bars.get_latest_bars_values(s, \"adj_close\", N=self.long_window)\n bar_date = self.bars.get_latest_bar_datetime(s)\n if len(bars)+1 <= self.short_window:\n short_sma = np.mean(bars)\n long_sma = np.mean(bars)\n elif self.short_window < len(bars)+1 and len(bars)+1 <= self.long_window:\n short_sma = np.mean(bars[-self.short_window-1:-1])\n long_sma = np.mean(bars)\n else:\n short_sma = np.mean(bars[-self.short_window-1:-1])\n long_sma = np.mean(bars[-self.long_window-1:-1])\n\n symbol = s\n cur_date = dt.utcnow()\n sig_dir = \"\"\n\n if short_sma > long_sma and self.bought[s] == \"OUT\":\n print(\"LONG: %s\" % bar_date)\n sig_dir = 'LONG'\n signal = SignalEvent(strategy_id=1, symbol=symbol, datetime=bar_date, signal_type=sig_dir, strength=1.0)\n self.events.put(signal)\n self.bought[s] = 'LONG'\n elif short_sma < long_sma and self.bought[s] == \"LONG\":\n print(\"SHORT: %s\" % bar_date)\n sig_dir = 'EXIT'\n signal = SignalEvent(strategy_id=1, symbol=symbol, datetime=bar_date, signal_type=sig_dir, strength=1.0)\n self.events.put(signal)\n self.bought[s] = 'OUT'", "def array(self):\n return list(self.sequence)", "def transform_basis(self, values):\n block_len = len(values)/self.base\n blocks = [values[i*block_len:(i+1)*block_len] for i in range(self.base)]\n return blocks", "def transform_basis(self, values):\n block_len = len(values)/self.base\n blocks = [values[i*block_len:(i+1)*block_len] for i in range(self.base)]\n return blocks", "def _get_pulse_shaping_waveform(self):\n self.pulse_shaping_list = []\n # Make the rise time be 3.3333% if the dot time.\n rise_time_in_msec = 0.03333333333333 * self.dot_time_in_msec\n # Limit the rise time to 2 milliseconds.\n if rise_time_in_msec > 0.002:\n rise_time_in_msec = 0.002\n rising_falling_count = int(rise_time_in_msec * self.sample_rate)\n step = math.pi / rising_falling_count\n # The first value is zero, so skip that value.\n # The last value is 1.0, so skip that value too.\n for i in range(1, rising_falling_count - 1):\n gain = 0.5 * (1.0 - math.cos(step * i))\n self.pulse_shaping_list.append(gain)", "def _splitPoints(self, points, split):\n # validate split\n if not split:\n return [points]\n\n # complete split with adding start and end frames\n if split[0] != 0:\n split.insert(0, 0)\n\n if split[-1] != len(points):\n split.append(len(points))\n\n # make sure split is sorted and doesn't contain any duplicates\n split = list(set(split))\n split.sort()\n\n # split range for looping\n splitA = split[:-1]\n splitB = split[1:]\n\n # get lists\n return [points[a:b + 1] for a, b in zip(splitA, splitB)]", "def split(f):\n n = len(f)\n f0 = [f[2 * i + 0] for i in range(n // 2)]\n f1 = [f[2 * i + 1] for i in range(n // 2)]\n return [f0, f1]", "def split_list(list_in,number_of_pieces):\n output_length = len(list_in) / number_of_pieces\n output = []\n piece = []\n counter = 0\n for list_item in list_in:\n counter += 1\n piece.append(list_item)\n if counter >= output_length:\n output.append(piece)\n counter = 0\n piece = []\n # Make sure nothing is missed\n if len(piece) > 0:\n output.append(piece)\n return output", "def forward(self, x):\n out = [x]\n for freq in self.freq_bands:\n for func in self.funcs:\n out += [func(freq*x)]\n\n return torch.cat(out, -1)", "def SI(As):\n return [A for A in As if A.is_SI()]", "def Chunks(l):\n return_list = [[]]\n counter = 0\n index = 0\n for i in l:\n # Size is split in half due to the max size being a sum of src and dst.\n if counter > (self._ADDRESS_LENGTH_LIMIT/2):\n counter = 0\n index += 1\n return_list.append([])\n if i.version == 6:\n counter += self._IPV6_SIZE\n else:\n counter += 1\n return_list[index].append(i)\n return return_list", "def list_by_list(list_to_be_splited, list_with_intervals):\n intervals = []\n for x, val in enumerate(list_to_be_splited):\n for y in list_with_intervals:\n if y == val:\n intervals.append((x, val))\n return intervals", "def split(base_list):\n list_mid_pointer=len(base_list)//2\n return base_list[:list_mid_pointer],base_list[list_mid_pointer:]", "def ancillary_spectra(self):\n return []", "def split_list(l, ratio=0.75):\n i = int(ratio * len(l))\n return l[:i], l[i:]", "def make_intervals(self):\n if not self:\n return []\n intervals = []\n self.sort()\n index_of_the_first = 0\n for i in range(len(self) - 1): # i: indexes from zero to len(self)\n if self[i] + 1 == self[i+1] or self[i] == self[i+1]:\n continue\n # elif self[i] == self[i+1]:\n # not_uniq.append( (self.count(self[i]), self[i]) )\n else:\n intervals.append((self[index_of_the_first], self[i]))\n index_of_the_first = i + 1\n # And now the last element:\n last_index = len(self) - 1\n intervals.append((self[index_of_the_first], self[last_index]))\n return intervals", "def _to_list(series: Union[TimeSeries, Sequence[TimeSeries]]) -> Sequence[TimeSeries]:\n\n return [series] if not isinstance(series, Sequence) else series", "def _merge_and_reduce(self, signals):\n\n if self.s_filter:\n\n signals = clean(signals,\n standardize=self.standardize,\n low_pass=self.low_pass,\n high_pass=self.high_pass,\n t_r=self.tr)\n \n return signals", "def _split_sample(sample):\n\n inputs, targets = sample\n return inputs, targets", "def split( self, rSilenceTresholdPercent = 0.1, rSilenceMinDuration = 0.3, nExtractJustFirsts = -1 ):\n nLimit = int( self.getSampleMaxValue() * rSilenceTresholdPercent / 100 ) \n print( \"INF: sound.Wav.split: splitting a sound of %5.3fs, using silence limits at %d for %5.3fs\" % (self.rDuration, nLimit, rSilenceMinDuration) ) \n aSplitted = []\n \n precalcWavIsNotSilence = np.abs(self.data)>nLimit\n\n #~ print self\n \n nCurrentPos = 0 # in data index (not sample)\n nSilenceMinLenData = rSilenceMinDuration * self.nAvgBytesPerSec * 8 / self.nNbrBitsPerSample\n while( nCurrentPos < len(self.data) ):\n \n # first find the beginning of a sound \n nFirstNonSilenceIndex = findFirstTrueValue( precalcWavIsNotSilence[nCurrentPos:] )\n #~ print( \"nFirstNonSilenceIndex (brut): %d\" % nFirstNonSilenceIndex )\n if( nFirstNonSilenceIndex == -1 ):\n # all remaining sound are silence!\n break\n nFirstNonSilenceIndex += nCurrentPos\n nNumFirstSample = nFirstNonSilenceIndex/self.nNbrChannel\n print( \"INF: sound.Wav.split: found a sound at sample %d\" % nNumFirstSample )\n nCurrentPos = nFirstNonSilenceIndex # so at the end, we're stopping\n \n # then find end\n nEndOfSilence = nNumFirstSample*self.nNbrChannel # init of the loop\n while( nEndOfSilence < len(self.data) ):\n #nFirstSilenceIndex = np.argmax( np.abs(self.data[nEndOfSilence:])<=nLimit )\n nFirstSilenceIndex = findFirstFalseValue( precalcWavIsNotSilence[nEndOfSilence:] ) \n #~ print( \"nFirstSilenceIndex (brut): %d (from %d)\" % (nFirstSilenceIndex, nEndOfSilence) )\n if( nFirstSilenceIndex == -1 ):\n break\n nFirstSilenceIndex += nEndOfSilence\n # ensure there's enough silence\n nEndOfSilence = findFirstTrueValue( precalcWavIsNotSilence[nFirstSilenceIndex:] )\n #~ print( \"nEndOfSilence (brut): %d (data: %d) (offset: %d)\" % (nEndOfSilence, self.data[nFirstSilenceIndex+nEndOfSilence],nEndOfSilence + nFirstSilenceIndex) )\n # positionnate onto the end of the silence for next time\n if( nEndOfSilence == -1 ):\n nCurrentPos = len(self.data)\n else:\n nCurrentPos = nEndOfSilence + nFirstSilenceIndex\n \n if( nEndOfSilence > nSilenceMinLenData or nEndOfSilence == -1 ):\n break\n nEndOfSilence += nFirstSilenceIndex\n # while - end\n \n # each time we're out, we've got a silence or we're at the end => new split\n if( nFirstSilenceIndex == -1 ):\n break\n nNumLastSample = nFirstSilenceIndex/self.nNbrChannel\n print( \"INF: sound.Wav.split: found the end of that sound at sample %d\" % nNumLastSample )\n if( nNumLastSample - nNumFirstSample > 4000 ):\n w = Wav()\n w.copyHeader( self )\n w.data = np.copy(self.data[nNumFirstSample*self.nNbrChannel:nNumLastSample*self.nNbrChannel])\n nPeakMax = max( max( w.data ), -min( w.data ) )\n if( nPeakMax > self.getSampleMaxValue() / 8 ): # remove glitch sound\n w.updateHeaderSizeFromDataLength()\n print( \"INF: sound.Wav.split: new split of %5.2fs\" % w.rDuration )\n aSplitted.append( w )\n #~ print( \"nCurLocalVs: %s\" % nCurLocalVs )\n if( nExtractJustFirsts != -1 and nExtractJustFirsts == len(aSplitted) ):\n print( \"WRN: sound.Wav.split: got enough split (%d), leaving...\" % len(aSplitted) )\n break\n # while - end\n print( \"INF: sound.Wav.split: created %d wav(s)\" % len( aSplitted ) )\n return aSplitted", "def split_list(a_list):\n half = len(a_list)/2\n return a_list[:half], a_list[half:]", "def as_list(self):\n return self._flattened_inputs", "def build_extracted_list(input_list, subinterval):\n out = []\n wait = subinterval\n for i in input_list:\n if wait == subinterval:\n out.append(i)\n wait = 0\n else:\n wait += 1\n return out", "def _send_signals(self, svc_names: List[str], sig: str):\n pass", "def send(signal, *args, **kw):\n result = []\n if signal in REGISTRY:\n result.extend(REGISTRY[signal].send(*args, **kw))\n return result" ]
[ "0.67559826", "0.637552", "0.6127554", "0.58000535", "0.5581344", "0.5568421", "0.5567797", "0.5507483", "0.54910624", "0.546959", "0.5445433", "0.539266", "0.53553843", "0.53532606", "0.5294758", "0.52855957", "0.52851367", "0.5276394", "0.52736336", "0.5243967", "0.5243068", "0.52381736", "0.5236668", "0.5222938", "0.5212559", "0.51642776", "0.5141297", "0.5127562", "0.51082045", "0.5094643", "0.5086553", "0.50823104", "0.5065536", "0.5056648", "0.5052488", "0.5046275", "0.5034341", "0.503427", "0.5030614", "0.5028401", "0.5026578", "0.5016797", "0.50095767", "0.49807674", "0.49772713", "0.4971016", "0.49653095", "0.49523607", "0.49423945", "0.49368742", "0.49261916", "0.49245206", "0.48972678", "0.48872706", "0.4879884", "0.48765272", "0.4869632", "0.4867751", "0.48557138", "0.4854763", "0.4851729", "0.48457068", "0.48405656", "0.48390466", "0.48343664", "0.48252827", "0.48104718", "0.47954395", "0.47945574", "0.47938615", "0.4789442", "0.4785172", "0.47700614", "0.4768055", "0.47670743", "0.476452", "0.47475487", "0.47454622", "0.47454622", "0.4745349", "0.47451288", "0.47339776", "0.4731815", "0.47306255", "0.4728862", "0.47275394", "0.4722343", "0.47137746", "0.4709613", "0.47055504", "0.47053778", "0.46984056", "0.46982333", "0.46929234", "0.46896532", "0.46827367", "0.46819752", "0.46814921", "0.46768177", "0.46721607" ]
0.68666124
0
List of signals is split into lists of single signals
def test_process_signal_list_of_one(self): input_signals = [Signal({"hello": "n.io"})] blk = SignalListSplitter() self.configure_block(blk, {}) blk.start() # one list of signals is processed blk.process_signals(input_signals) blk.stop() self.assert_num_signals_notified(1) # and one list of one signal has been notified for each signal self.assertEqual(len(self.notified_signals[DEFAULT_TERMINAL]), 1) for signal_list in self.notified_signals[DEFAULT_TERMINAL]: self.assertEqual(len(signal_list), 1) self.assertEqual(signal_list[0].to_dict(), { "hello": "n.io", })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_process_signal_list(self):\n input_signals = [Signal({\"hello\": \"n.io\"}),\n Signal({\"hello\": \"n.io\"}),\n Signal({\"hello\": \"n.io\"})]\n blk = SignalListSplitter()\n self.configure_block(blk, {})\n blk.start()\n # one list of signals is processed\n blk.process_signals(input_signals)\n blk.stop()\n self.assert_num_signals_notified(3)\n # and one list of one signal has been notified for each signal\n self.assertEqual(len(self.notified_signals[DEFAULT_TERMINAL]), 3)\n for signal_list in self.notified_signals[DEFAULT_TERMINAL]:\n self.assertEqual(len(signal_list), 1)\n self.assertEqual(signal_list[0].to_dict(), {\n \"hello\": \"n.io\",\n })", "def add(self, signal_list):\n result = []\n for signals in signal_list:\n result.append(\n signals * signal.blackmanharris(\n len(signals),\n sym=False\n )\n )\n return result", "def signal(self) -> list:\n raise NotImplementedError(\"You must implement signal\")", "def read_all_signals(self):\n return [pio.sample(signal_idx)\n for signal_idx in self._signals_idx]", "def _signals(cls, idx, m, n):\n import numpy as np\n signal = []\n\n # Generating all the frequencies from a time series of length n\n fs = np.fft.fftfreq(n)\n\n # Loop through the frequencies in idx\n for i in idx:\n freq = fs[i]\n\n # Computing the sinusoids for the ith frequency\n signal.append(np.cos(2 * np.pi * m * freq) + complex(0, np.sin(2 * np.pi * m * freq)))\n return np.array(signal)", "def _remove_redundant_signals(self, signals: List[Signal]):\n open_positions = self._broker.get_positions()\n tickers_with_open_positions = set(\n self._contract_to_ticker(position.contract()) for position in open_positions\n )\n\n signals_with_suggested_exposure_out = [signal for signal in signals if\n signal.suggested_exposure == Exposure.OUT]\n redundant_signals = [signal for signal in signals_with_suggested_exposure_out\n if signal.ticker not in tickers_with_open_positions]\n\n for signal in redundant_signals:\n signals.remove(signal)", "def test_add_signals():\n x = np.linspace(390, 410, 200)\n doublet = [(399, 1), (401, 1)]\n y = add_signals(x, doublet, 1)\n X = np.array([x for x, _ in ADD_SIGNALS_DATASET])\n Y = np.array([y / 2 for _, y in ADD_SIGNALS_DATASET]) # scale to match\n print(y)\n print(Y)\n assert np.array_equal(x, X)\n assert np.array_equal(y, Y)", "def _read_signals(edf_file, header):\n signals = OrderedDict([(label, []) for label in header['label']])\n\n while True:\n try:\n record = _read_record(edf_file, header)\n except EOFError:\n break\n\n for label, signal in record.items():\n signals[label].append(signal)\n\n for label, signal in signals.items():\n signals[label] = np.concatenate(signal)\n\n return signals", "def signals(self):\n if self._current_inputs is None:\n raise RuntimeError(\n 'Internal Error: The current inputs have not been properly '\n 'generated. First call features_and_labels, then call signals.')\n signals = self._current_inputs['signals']\n self._current_inputs = None\n return signals", "def _sampleSignals(self):\n mSig_ = np.hstack((self.mSig, np.nan*np.zeros((self.nSigs,1)))) # Add nan to the end of signals\n\n # Generate matrix with patterns where NaN is changed to index of NaN in mSig_\n mPattsRep_ = self.mPattsRep.copy()\n (_, nSamps) = self.mSig.shape # Get the number of samples in the signals\n mPattsRep_[np.isnan(self.mPattsRep)] = nSamps # Change nan into pointer to nan\n \n self.mObSig = (mSig_[np.arange(self.nSigs), mPattsRep_.T.astype(int)]).T # Generate correct observation signals\n return", "def getEDFsignals(edf):\n n = edf.signals_in_file\n samples = edf.getNSamples()[0]\n signals = np.zeros((n, samples))\n for i in range(n):\n try:\n signals[i,:] = edf.readSignal(i)\n except:\n pass\n return signals", "def Signals(sigtype, num_sigs):\n assert isinstance(sigtype, (bool, intbv))\n sigs = [Signal(sigtype) for _ in range(num_sigs)]\n return sigs", "def get_output_bands(self):\n dlist=self.dest_list.children()\n out_list=[]\n for item in dlist:\n out_list.append((self.output_bands[item][0],\n self.output_bands[item][1]))\n return out_list", "def getSplitDetectorSignal(self):\r\n\t\treturn self.splitData", "def get_signal_info(self, signal_names):\n result = []\n for name in signal_names:\n description = self._pio.signal_description(name)\n domain_type = self._pio.signal_domain_type(name)\n aggregation, format_type, behavior = self._pio.signal_info(name)\n result.append((name, description, domain_type, aggregation, format_type, behavior))\n return result", "def split_registrations(list_of_registrations):\n list_of_registrations.sort(key=lambda registration: registration.service)\n\n sub_list = []\n main_list = []\n previous = list_of_registrations[0]\n\n for registration in list_of_registrations:\n if previous.service == registration.service:\n sub_list.append(registration)\n else:\n main_list.append(sub_list)\n sub_list = [registration]\n previous = registration\n\n main_list.append(sub_list)\n return main_list", "def GetSignals(cls):\n return []", "def pick_signals(processor, source = 'input'):\n\n if source == 'input':\n bin_edges = processor.input_parameters['bin_edges']\n raw_signal = processor.input_signal\n elif source == 'output':\n bin_edges = processor.output_parameters['bin_edges']\n raw_signal = processor.output_signal\n else:\n raise ValueError('Unknown value for the data source')\n t = np.zeros(len(raw_signal)*4)\n bins = np.zeros(len(raw_signal)*4)\n signal = np.zeros(len(raw_signal)*4)\n value = 1.\n\n for i, edges in enumerate(bin_edges):\n t[4*i] = edges[0]\n t[4*i+1] = edges[0]\n t[4*i+2] = edges[1]\n t[4*i+3] = edges[1]\n bins[4*i] = 0.\n bins[4*i+1] = value\n bins[4*i+2] = value\n bins[4*i+3] = 0.\n signal[4*i] = 0.\n signal[4*i+1] = raw_signal[i]\n signal[4*i+2] = raw_signal[i]\n signal[4*i+3] = 0.\n value *= -1\n\n z = t * c\n return (t, z, bins, signal)", "def _process_group(self, signals, group, input_id, signals_to_notify):\n signals_to_notify = []\n for signal in signals:\n state_change = self._process_state(signal, group)\n if state_change is not None:\n # If we are excluding existing fields we want to add\n # the states and previous states to an empty signal\n if self.exclude():\n signal = Signal()\n setattr(signal,\n 'prev_{}'.format(self.state_name()), state_change[0])\n setattr(signal, '{}'.format(\n self.state_name()), state_change[1])\n setattr(signal, 'group', group)\n signals_to_notify.append(signal)\n return signals_to_notify", "def split_list(self):\n wanted_parts = self.args.ncore\n alist = glob.glob(self.args.input + '*.root')\n length = len(alist)\n return [alist[i * length // wanted_parts: (i + 1) * length // wanted_parts]\n for i in range(wanted_parts)]", "def get_signals(self, symbols):\n buy = []\n sell = []\n hold = []\n for ticker in symbols:\n print(ticker)\n params = {\n 'symbol' : ticker,\n 'interval' : 'daily',\n 'series_type' : 'close',\n 'fastperiod': self.fastperiod,\n 'slowperiod':self.slowperiod,\n 'signalperiod':self.signalperiod\n }\n macdjson = av_call('MACD', params)\n\n if ticker == 'AMGN':\n print(macdjson)\n\n macd = get_df_from_av_json(macdjson)\n if macd['MACD'][-1] > macd['MACD_Signal'][-1]:\n buy.append(ticker)\n elif macd['MACD'][-1] > macd['MACD_Signal'][-1]:\n sell.append(ticker)\n else:\n hold.append(ticker)\n \n return {\n 'buy': buy,\n 'sell': sell,\n 'hold': hold\n }", "def removeDuplicates(self,covariateList,bands):\n\t\t\n\t\treturn [elem for elem in covariateList if elem not in bands]", "def prepare_signals_for_rendering(multiple_beams):\n logging.debug('running the prepare_signals_for_rendering function')\n multiple_envelopes = []\n\n for i, single_beam in enumerate(multiple_beams):\n logging.debug('Running through single beam signal number %d', i)\n rectified_signal = abs(single_beam)\n window_size = determine_window_size(rectified_signal)\n logging.debug('window_size determined to to be %d', window_size)\n single_envelope = envelope_detect(rectified_signal, window_size)\n multiple_envelopes.append(single_envelope)\n\n multiple_envelopes = log_compress(multiple_envelopes)\n multiple_envelopes = account_for_distance(multiple_envelopes)\n multiple_envelopes = account_for_harmonics(multiple_envelopes)\n\n return multiple_envelopes", "def averaging(signal_list):\n #check if their length match\n N = len(signal_list)-1\n len_0 = len(singal_list[0])\n i = 1\n while (i <= N):\n if (len_0 == len(signal_list[i])):\n i = i+1\n else:\n break\n same_length = (i > N)\n\n #loop of averaging frame by frame\n if same_length:\n avg_arr = np.zeros(signal_list[0].shape, dtype=float)\n for i in range(len(signal_list[0])):\n sum = 0\n for j in range(len(signal_list)):\n sum = sum + signal_list[j][i]\n avg_arr[i] = sum/len(signal_list)\n else:\n print(\"The signals should be uniform in length\")\n\n return avg_arr", "def split(self, X):", "def convert(filename, signal=sampleArray(), \n centerEta = DEFCENTERETA, centerPhi = DEFCENTERPHI): \n numEvents = 0\n listOfSignals = []\n # First we open the file\n with open(filename, \"r\") as f:\n content = f.readlines()\n numEvents = len(content)\n for i in range(0, numEvents):\n if i % 1000 == 0:\n print(i)\n thisEvent = content[i]\n try:\n reducedEvent = convertEvent(\n thisEvent, centerEta, centerPhi, signal.shape[0], signal.shape[1]\n ) \n listOfSignals.append(reducedEvent)\n except indexError:\n 0\n print(\"Converted\", len(listOfSignals), \"out of\", numEvents, \"events\")\n return listOfSignals", "def caculate_signals(self):\n\t\traise NotImplementedError(\"Should implement calculate_signals()\")", "def split(self, x):\r\n new_beams = np.array([])\r\n for bar in self.bar_elements:\r\n new_beams = np.concatenate((new_beams, bar.split(x)))\r\n return BeamElements(new_beams)", "def read_list(f, nb_freqs):\n alist = []\n while len(alist) < nb_freqs:\n line = f.readline()\n splitted = line.split()\n well_splitted = True\n for entry in splitted:\n well_splitted = well_splitted and entry.count('.') <= 1\n if well_splitted:\n entries = splitted\n else:\n if line.count('-') > 0:\n # Probably coming from an SDSS spectrum.\n entries = [line[i:i+12] for i in range(0, len(line) - 1, 12)]\n else:\n entries = [line[i:i+8] for i in range(0, len(line) - 1, 8)]\n for entry in entries:\n try:\n alist.append(float(entry))\n except ValueError:\n # If conversion to float fails, put 0 instead.\n alist.append(0)\n return numpy.array(alist)", "def calculate_signals(self):\n raise NotImplementedError(\"Should implement calculate_signals()\")", "def simulation_to_lines(data: List(Float))->List(Tuple(Int, Float)):\n result = []\n counter = 0\n for payoff in data:\n result = result + [(counter, payoff)]\n counter+=1\n return result\n\n #print(str(result))", "def waveforms(self):\n return list(self._waveforms)", "def filter_signals(self, df_phys):\n if not df_phys.empty and len(self.signals):\n df_phys = df_phys[df_phys[\"Signal\"].isin(self.signals)]\n\n return df_phys", "def build_wires(self) -> List[Segment]:\n segments = [Segment(self.ORIGIN, self.ORIGIN.move_to(self._diagram[0]))]\n for step in self._diagram[1:]:\n segments.append(segments[-1].wire_to(step))\n\n return segments", "def split_train_eval(full_list):\n tr_list = []\n ev_list = []\n random.shuffle(full_list)\n tot = len(full_list)\n tot80 = int(0.8 * tot)\n for rg in range(tot):\n if rg < tot80:\n tr_list.append(full_list[rg])\n else:\n ev_list.append(full_list[rg])\n return [tr_list, ev_list]", "def generate_signals(self):\n signals = {}\n\n # Create the set of short and long simple moving averages over the \n # respective periods\n signals['short_mavg'] = self.bars.rolling(window=self.short_window).mean()\n signals['long_mavg'] = self.bars.rolling(window=self.long_window).mean()\n\n\n return signals", "def reconstruct_signal(_X):\n width = _X.shape[1]\n N = _X.shape[0]\n n = N // 2\n\n head = _X[:n, 0]\n tail = _X[n:, width - 1]\n body = np.array([_X[n:, i] + _X[:n, i + 1] for i in range(width - 1)]).reshape(n * (width - 1))\n\n return np.append(head, np.append(body, tail))", "def signal_to_training( # pylint: disable=too-many-locals\n self,\n signal: Union[Dict, List[Dict]]\n ) -> Tuple[np.ndarray, Tuple[np.ndarray, ...], np.ndarray, Dict[str, Any]]:\n dict_list = list(signal) if isinstance(signal, list) else list((signal, ))\n\n # Initialize the return values\n time_length = len(dict_list[0]['signal']['time']['data']) # type: ignore\n length = int(time_length / 2)\n signals = np.zeros((0, time_length))\n result_r = np.zeros((0, length))\n result_b = np.zeros((0, length))\n result_h = np.zeros((0, length))\n result_m = np.zeros((0, length))\n result_p = np.zeros((0, length))\n answer = np.zeros((0, length))\n config = {\n 'SNR': [],\n 'count': [],\n 'frequencies': [],\n 'amplitudes': [],\n 'minamplitude': [],\n 'mindist': []\n } # type: Dict[str, Any]\n\n # Calculate window functions\n window_bartlett = np.bartlett(time_length)\n window_hanning = np.hanning(time_length)\n window_meyer = self._meyer_wavelet(time_length)\n window_poisson = exponential(time_length, sym=True, tau=(time_length/2)*(8.69/60.0))\n\n # Loop all data entries\n for data in dict_list:\n time = np.asarray(data['signal']['time']['data'])\n signals = np.concatenate((signals, np.reshape(time, (1,) + time.shape)))\n config['SNR'].append(data['signal']['SNR'])\n\n # Assemble the FFTs\n fft = np.fft.fft(time)[:length] / time_length\n result_r = np.concatenate((result_r, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_bartlett)[:length] / time_length\n result_b = np.concatenate((result_b, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_hanning)[:length] / time_length\n result_h = np.concatenate((result_h, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_meyer)[:length] / time_length\n result_m = np.concatenate((result_m, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_poisson)[:length] / time_length\n result_p = np.concatenate((result_p, np.reshape(fft, (1,) + fft.shape)))\n\n # Assemble all the frequencies and amplitudes\n count = 0\n freqs = []\n ampls = []\n counting = np.zeros((1, length))\n for subsig in data['signal']['parts']:\n if subsig['signal']['type'] == 'SingleOscillation':\n count += 1\n freq = subsig['signal']['frequency']\n counting[0, int(max(0, min(length - 1, round(freq))))] += 1\n freqs.append(freq)\n ampls.append(subsig['signal']['amplitude'])\n config['count'].append(count)\n\n # Sort frequencies and amplitudes by frequency\n np_freqs = np.asarray(freqs)\n sorting = np.unravel_index(np.argsort(np_freqs), np_freqs.shape)\n np_freqs = np_freqs[sorting]\n np_ampls = np.asarray(ampls)[sorting]\n\n # Assemble some statistics\n config['mindist'].append(999999. if len(np_freqs) < 2 else np.min(np.diff(np_freqs)))\n config['minamplitude'].append(np.min(np_ampls) if len(np_ampls) > 0 else 999999.)\n config['frequencies'].append(np_freqs)\n config['amplitudes'].append(np_ampls)\n answer = np.concatenate((answer, counting))\n\n # Assemble results\n ffts = (result_r, result_b, result_h, result_m, result_p)\n return signals, ffts, answer, config", "def signal_filter(signal_processor):\n while True:\n synapse, data = (yield)\n try:\n signal = Signal.deserialize(data)\n except ProtocolError, reason:\n # low-level protocol error (e.g transmission of '[1,2' )\n #logs.logger.debug(\"filter error: %s,%s\" % (synapse, reason))\n err_response = Signal('error', (Signal.ProtocolError, str(reason)))\n synapse.transmit(err_response)\n synapse.disconnect()\n else:\n signal_processor.send((synapse, signal))", "def test_read_multiple_specified_signals(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n test_dir = os.path.join(cwd, 'test_files/')\n signals = read_signals(test_dir, ['test2', 'test3'])\n self.assertEquals(len(signals), 2)", "def split(self, x):\r\n if x >= self.n2.x or x <= self.n1.x: return [self]\r\n n_intermediate = Node.MiddleNode(x=x)\r\n bar1 = BeamElement(nodes=[self.n1, n_intermediate], section=self.section, material=self.material)\r\n bar2 = BeamElement(nodes=[n_intermediate, self.n2], section=self.section, material=self.material)\r\n return [bar1, bar2]", "def calculate_signals(self, event: MarketEvent):\n for symbol, bars in event.symbol_data.items():\n if not self.bought[symbol]:\n signal = SignalEvent(bars[-1].symbol, bars[-1].time, 'LONG')\n self.events.add_event(signal)\n self.bought[symbol] = True", "def visit_list(self, sylist):\n for value in sylist:\n child = type(value)(value.container_type)\n self.current.append(child)\n value.visit(SpineCopyVisitor(child))", "def binn_fft(self):\n bin_res = []\n for fft_bin in BINS:\n bin_res.append(self.bin_spec_y(fft_bin[0], fft_bin[1]))\n return bin_res", "def input_slice(self, inputs):\n result = []\n for i in range(int(len(inputs) / self.window_size)):\n result.append(inputs[i * self.window_size:(i + 1) * self.window_size])\n return result", "def get_signals(self):\n return QFDataFrame(data=self._signals, index=self._signals_dates)", "def build_symptoms(self, signals):\n\n symptoms = []\n for signal in signals:\n new_symptom = symptom.Symptom(self.symptom_tag, self.action_msg, signal)\n symptoms.append(new_symptom)\n\n return symptoms", "def extract_onset_events(bin_path, chanList, chunk_size=4000):\n meta = readMeta(bin_path)\n sRate = SampRate(meta)\n\n n_samples = int(float(meta['fileTimeSecs']) * sRate)\n n_chunks = sp.floor(n_samples / chunk_size).astype('int32')\n print(\"leftover samples: %i\" % (n_samples % n_chunks))\n\n rawData = makeMemMapRaw(bin_path, meta)\n\n events = []\n for ch in chanList:\n inds = []\n\n # get digital data for the selected lines\n for i in tqdm(range(n_chunks)):\n start = i * chunk_size\n stop = start + chunk_size\n\n digArray = ExtractDigital(rawData, start, stop, 0, range(8), meta)\n trig_data = digArray[ch,:]\n\n ix = sp.where(sp.diff(trig_data) == 1)[0]\n inds.append(ix+start)\n # if len(ix) > 0:\n # print(len(ix))\n\n inds = sp.concatenate(inds)\n times = inds / sRate\n events.append([inds,times])\n\n return events", "def bin_the_data(neuron_spikes, first, last, bin_size):\n neuron_activity = []\n timebins = range(first, int(last) + int(last) % bin_size, bin_size)\n for spike in neuron_spikes:\n activity = []\n spike_time = spike[0]\n i = 0\n for bin_size in timebins:\n k = 0\n while spike_time < bin_size:\n i += 1\n if i >= np.size(spike):\n break\n spike_time = spike[i]\n k += 1\n activity.append(k)\n neuron_activity.append(activity)\n return neuron_activity, timebins", "def generate_sequential_events_list(generator_spec_list):\n\n data = []\n for spec in generator_spec_list:\n generator = spec[tg.GENERATOR]\n data += tg.generate_round_robin_data_stream(generator.models,\n spec[tg.NUM_EVENTS])\n return data", "def _convert_to_multi_segment(self):\n\n self.header['nb_segment'] = [self.info['n_episodes']]\n\n # drop repeated signal headers\n self.header['signal_channels'] = \\\n self.header['signal_channels'].reshape(\n self.info['n_episodes'], -1)[0]\n\n # reshape signal memmap list\n new_sig_memmaps = []\n n_channels = len(self.header['signal_channels'])\n sig_memmaps = self._raw_signals[0]\n for first_index in np.arange(0, len(sig_memmaps), n_channels):\n new_sig_memmaps.append(\n sig_memmaps[first_index:first_index + n_channels])\n self._raw_signals = new_sig_memmaps\n\n self.logger.debug('New number of segments: {}'.format(\n self.info['n_episodes']))\n\n return", "def split_list_by(lst, sepfunc, includesep):\n\tblocks = []\n\tblock = []\n\tfor elem in lst:\n\t\tif sepfunc(elem):\n\t\t\tif includesep:\n\t\t\t\tblock.append(elem)\n\t\t\tblocks.append(block)\n\t\t\tblock = []\n\t\telse:\n\t\t\tblock.append(elem)\n\tif len(block):\n\t\tblocks.append(block)\n\treturn blocks", "def gen_list(self, x_list, z, s, nsamp):\n x_list = self.transform_xin_list(x_list)\n pred_list = self.sample_gp_pred(nsamp, x_list)\n pred_list = [self.dt.inv_transform_y_data(pr) for pr in pred_list]\n return pred_list", "def merge_one_sensor(slist):\n r = strip_file(slist[0],leave_header=True)\n for s in slist[1:]:\n r += strip_file(s,leave_header=False)\n return r", "def _split_inputs_outputs(self, data):\n\n\t\tinputs = []\n\t\toutputs = []\n\n\t\tfor point in data:\n\t\t\tinputs.append(point[0])\n\t\t\toutputs.append(point[1])\n\n\t\treturn np.array(inputs), np.array(outputs)", "def convert_to_list(self): \n self.reads = list(self.reads)\n self.sampling = False", "def _process_egocentric(self, signal: egocentric.EgocentricSignal):\n output_signals = []\n output_signals += self._process_egocentric_direction(\n self._get_hparam('egocentric_direction_mode'),\n signal.xz_direction,\n signal.yz_direction)\n output_signals += self._process_egocentric_distance(\n self._get_hparam('egocentric_distance_mode'),\n signal.distance)\n return output_signals", "def synthesize1(amps, freqs, ts):\n components = [thinkdsp.ComplexSinusoid(freq, amp)\n for amp, freq in zip(amps, freqs)]\n signal = thinkdsp.SumSignal(*components)\n\n ys = signal.evaluate(ts)\n return ys", "def get_samples_per_signal(self):\n return np.array([self.samples_in_file(chn) for chn in range(self.signals_in_file)])", "def calculate_signals(self):\n\t\traise NotImplementedError(\n\t\t\t\"Should implement calculate_signals()\\n\" + \\\n\t\t\t\"By calling this method to calculate 'Signal' Events\"\n\t\t)", "def base_to_signal_mapping(grp):\n\n position_in_signal = [0 for _ in range(5)]\n for i in range(1, len(grp)):\n position_in_signal += [i for _ in range(grp[i][5])]\n # position_in_signal += [grp[i][0] for _ in range(grp[i][5])]\n\n # print(position_in_signal)\n return position_in_signal", "def generate_signals(self):\n signals = {}\n \n\n # Create the set of short and long exponential moving averages over the \n # respective periods\n signals['short'] = self.bars.ewm(span = self.short_window , min_periods=self.long_window-1).mean()\n signals['long'] = self.bars.ewm(span = self.long_window , min_periods=self.long_window-1).mean()\n signals['MACD'] = signals['short'] - signals['long']\n signals['MACDsign'] = signals['MACD'].ewm(span = self.signal_window , min_periods=self.long_window-1).mean()\n signals['MACDdiff'] = signals['MACD'] - signals['MACDsign']\n\n \n return signals", "def smart_split(strokes):\n\n splited = []\n for stroke in strokes:\n splited += stroke.split_non_differentiable_points()\n return splited", "def postgen_list(self, x_list, s, nsamp):\n x_list = self.transform_xin_list(x_list)\n pred_list = self.sample_gp_post_pred(\n nsamp, x_list, full_cov=True, nloop=np.min([50, nsamp])\n )\n pred_list = [self.dt.inv_transform_y_data(pr) for pr in pred_list]\n return pred_list", "def rawSignals(obars, window=21, nbands=3, inc=0.5, save=True):\n bars = obars.copy() # avoid warnings\n bars['OHLC'] = np.nan # typical price\n bars.OHLC.values[:] = np.mean(bars.values[:,0:4], axis=1) # 1000x faster\n price = bars.OHLC.values\n for i in range(nbands):\n upband, sma, lwband = ta.BBANDS(price, window*inc)\n if save: # for plotting stuff\n bars['bandlw'+str(i)] = lwband\n bars['bandup'+str(i)] = upband\n bars['bandsg'+str(i)] = 0 # signal for this band\n signals = fastbollingerSignal(price, upband, lwband)\n bars.loc[:, 'bandsg'+str(i)] = signals.astype(int) # signal for this band\n inc += 0.5\n bars.dropna(inplace=True)\n return bars", "def stream2inputs(stream):\r\n inputs = list(stream.sorted.flat.getElementsByClass([\"Note\", \"Chord\", \"Rest\"]))\r\n return [MidiLSTM._to_midi_values(x) for x in inputs]", "def reformatList( listOfPaths):\n newList = []\n first = True\n for seg in listOfPaths: \n newList += seg.asSVGCommand(first)\n first = False\n return newList", "def make_lists(sv):\r\n \r\n mark_delayed(sv) # identify delayed objects\r\n make_pin_list(sv) # detect and initialize inputs (to false) \r\n make_old_list(sv) # create a list of used old/old \r", "def InSlotsGet(self):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotsGet', body_size=0)\n \n self.NanonisTCP.send_command(hex_rep)\n \n response = self.NanonisTCP.receive_response()\n \n # signals_names_size = self.NanonisTCP.hex_to_int32(response[0:4])\n signals_names_num = self.NanonisTCP.hex_to_int32(response[4:8])\n \n idx = 8\n signal_names = []\n for n in range(signals_names_num):\n size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n idx += 4\n signal_name = response[idx:idx+size].decode()\n idx += size\n signal_names.append(signal_name)\n \n signal_indexes = []\n signal_indexes_size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n for n in range(signal_indexes_size):\n idx += 4\n signal_index = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n signal_indexes.append(signal_index)\n \n return [signal_names,signal_indexes]", "def test_read_multiple_signals(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n test_dir = os.path.join(cwd, 'test_files/')\n signals = read_signals(test_dir)\n self.assertEqual(len(signals), 3)\n self.assertTrue(\n any(signal for signal in signals if signal.name == \"test1\"))\n self.assertTrue(\n any(signal for signal in signals if signal.name == \"test2\"))\n self.assertTrue(\n any(signal for signal in signals if signal.name == \"test3\"))", "def signal_to_frames(signal, frame_len, frame_step, win_func=None):\n assert signal.ndim == 1\n\n signal_len = len(signal)\n frame_len = int(round(frame_len))\n frame_step = int(round(frame_step))\n num_frames = number_frames(signal_len, frame_len, frame_step)\n\n indices = indices_grid(frame_len, frame_step, num_frames)\n framed_signal = signal[indices]\n\n if win_func is not None:\n framed_signal = win_func(framed_signal)\n\n remain_signal = []\n # Add plus one to get first index\n # that is not in framed_signal\n max_idx = np.max(indices) + 1\n if max_idx <= signal_len - 1:\n remain_signal = np.r_[remain_signal, signal[max_idx:]]\n\n return framed_signal, remain_signal", "def split_multiple_recordings(audio, min_silence_duration=0.25, noise_threshold=150, sample_rate_hz=8e3):\n # A list of tuples (start, stop)\n min_silence_frame = sample_rate_hz * min_silence_duration\n silence_zones = []\n\n zone_start = None\n zone_end = None\n\n for idx, point in enumerate(audio):\n if abs(point) < noise_threshold and zone_start is None:\n zone_start = idx\n\n if abs(point) > noise_threshold and zone_start is not None:\n zone_end = idx\n\n # If we are in a silent zone and we come to the end point\n if zone_start is not None and zone_end and abs(point) > noise_threshold:\n if (zone_end - zone_start) > min_silence_frame:\n silence_zones.append((zone_start, zone_end))\n\n zone_start = None\n zone_end = None\n\n # Split the recording by the zones\n split_recordings = []\n for idx, zone in enumerate(silence_zones):\n if idx == 0:\n start = 0\n else:\n start = silence_zones[idx - 1][1]\n\n end = zone[0]\n split_recordings.append(audio[start:end])\n\n return split_recordings", "def separate_all_data(data_arrays_list):\n separated_data_list = []\n for i in range(0, len(data_arrays_list)):\n separated_data_list.append(separate_data(data_arrays_list[i]))\n return separated_data_list", "def signalAll(self, signal, startswithname=None):\n for name in self.processes.keys():\n if startswithname is None or name.startswith(startswithname):\n self.signalProcess(signal, name)", "def transform_to_one_chanel_data(train_data_lst, test_data_lst, data_anots):\r\n num_chanles = train_data_lst[0].shape[2]\r\n train_data_chanels = []\r\n test_data_chanels = []\r\n new_anots = [] \r\n for data_idx in range(len(train_data_lst)):\r\n signals = train_data_lst[data_idx]\r\n test_signals = test_data_lst[data_idx]\r\n \r\n for ch_idx in range(num_chanles):\r\n train_one_chanel = select_one_chanel(signals, ch_idx)\r\n train_data_chanels.append(train_one_chanel)\r\n \r\n test_data_one_chanel = select_one_chanel(test_signals, ch_idx)\r\n test_data_chanels.append(test_data_one_chanel)\r\n new_anots.append(data_anots[data_idx]+'_'+str(ch_idx)) \r\n return train_data_chanels, test_data_chanels, new_anots", "def calculate_signals(self, event):\n if event.type == 'MARKET':\n for s in self.symbol_list:\n bars = self.bars.get_latest_bars_values(s, \"adj_close\", N=self.long_window)\n bar_date = self.bars.get_latest_bar_datetime(s)\n if len(bars)+1 <= self.short_window:\n short_sma = np.mean(bars)\n long_sma = np.mean(bars)\n elif self.short_window < len(bars)+1 and len(bars)+1 <= self.long_window:\n short_sma = np.mean(bars[-self.short_window-1:-1])\n long_sma = np.mean(bars)\n else:\n short_sma = np.mean(bars[-self.short_window-1:-1])\n long_sma = np.mean(bars[-self.long_window-1:-1])\n\n symbol = s\n cur_date = dt.utcnow()\n sig_dir = \"\"\n\n if short_sma > long_sma and self.bought[s] == \"OUT\":\n print(\"LONG: %s\" % bar_date)\n sig_dir = 'LONG'\n signal = SignalEvent(strategy_id=1, symbol=symbol, datetime=bar_date, signal_type=sig_dir, strength=1.0)\n self.events.put(signal)\n self.bought[s] = 'LONG'\n elif short_sma < long_sma and self.bought[s] == \"LONG\":\n print(\"SHORT: %s\" % bar_date)\n sig_dir = 'EXIT'\n signal = SignalEvent(strategy_id=1, symbol=symbol, datetime=bar_date, signal_type=sig_dir, strength=1.0)\n self.events.put(signal)\n self.bought[s] = 'OUT'", "def array(self):\n return list(self.sequence)", "def transform_basis(self, values):\n block_len = len(values)/self.base\n blocks = [values[i*block_len:(i+1)*block_len] for i in range(self.base)]\n return blocks", "def transform_basis(self, values):\n block_len = len(values)/self.base\n blocks = [values[i*block_len:(i+1)*block_len] for i in range(self.base)]\n return blocks", "def _get_pulse_shaping_waveform(self):\n self.pulse_shaping_list = []\n # Make the rise time be 3.3333% if the dot time.\n rise_time_in_msec = 0.03333333333333 * self.dot_time_in_msec\n # Limit the rise time to 2 milliseconds.\n if rise_time_in_msec > 0.002:\n rise_time_in_msec = 0.002\n rising_falling_count = int(rise_time_in_msec * self.sample_rate)\n step = math.pi / rising_falling_count\n # The first value is zero, so skip that value.\n # The last value is 1.0, so skip that value too.\n for i in range(1, rising_falling_count - 1):\n gain = 0.5 * (1.0 - math.cos(step * i))\n self.pulse_shaping_list.append(gain)", "def _splitPoints(self, points, split):\n # validate split\n if not split:\n return [points]\n\n # complete split with adding start and end frames\n if split[0] != 0:\n split.insert(0, 0)\n\n if split[-1] != len(points):\n split.append(len(points))\n\n # make sure split is sorted and doesn't contain any duplicates\n split = list(set(split))\n split.sort()\n\n # split range for looping\n splitA = split[:-1]\n splitB = split[1:]\n\n # get lists\n return [points[a:b + 1] for a, b in zip(splitA, splitB)]", "def split(f):\n n = len(f)\n f0 = [f[2 * i + 0] for i in range(n // 2)]\n f1 = [f[2 * i + 1] for i in range(n // 2)]\n return [f0, f1]", "def split_list(list_in,number_of_pieces):\n output_length = len(list_in) / number_of_pieces\n output = []\n piece = []\n counter = 0\n for list_item in list_in:\n counter += 1\n piece.append(list_item)\n if counter >= output_length:\n output.append(piece)\n counter = 0\n piece = []\n # Make sure nothing is missed\n if len(piece) > 0:\n output.append(piece)\n return output", "def forward(self, x):\n out = [x]\n for freq in self.freq_bands:\n for func in self.funcs:\n out += [func(freq*x)]\n\n return torch.cat(out, -1)", "def SI(As):\n return [A for A in As if A.is_SI()]", "def Chunks(l):\n return_list = [[]]\n counter = 0\n index = 0\n for i in l:\n # Size is split in half due to the max size being a sum of src and dst.\n if counter > (self._ADDRESS_LENGTH_LIMIT/2):\n counter = 0\n index += 1\n return_list.append([])\n if i.version == 6:\n counter += self._IPV6_SIZE\n else:\n counter += 1\n return_list[index].append(i)\n return return_list", "def list_by_list(list_to_be_splited, list_with_intervals):\n intervals = []\n for x, val in enumerate(list_to_be_splited):\n for y in list_with_intervals:\n if y == val:\n intervals.append((x, val))\n return intervals", "def split(base_list):\n list_mid_pointer=len(base_list)//2\n return base_list[:list_mid_pointer],base_list[list_mid_pointer:]", "def ancillary_spectra(self):\n return []", "def split_list(l, ratio=0.75):\n i = int(ratio * len(l))\n return l[:i], l[i:]", "def make_intervals(self):\n if not self:\n return []\n intervals = []\n self.sort()\n index_of_the_first = 0\n for i in range(len(self) - 1): # i: indexes from zero to len(self)\n if self[i] + 1 == self[i+1] or self[i] == self[i+1]:\n continue\n # elif self[i] == self[i+1]:\n # not_uniq.append( (self.count(self[i]), self[i]) )\n else:\n intervals.append((self[index_of_the_first], self[i]))\n index_of_the_first = i + 1\n # And now the last element:\n last_index = len(self) - 1\n intervals.append((self[index_of_the_first], self[last_index]))\n return intervals", "def _to_list(series: Union[TimeSeries, Sequence[TimeSeries]]) -> Sequence[TimeSeries]:\n\n return [series] if not isinstance(series, Sequence) else series", "def _merge_and_reduce(self, signals):\n\n if self.s_filter:\n\n signals = clean(signals,\n standardize=self.standardize,\n low_pass=self.low_pass,\n high_pass=self.high_pass,\n t_r=self.tr)\n \n return signals", "def _split_sample(sample):\n\n inputs, targets = sample\n return inputs, targets", "def split( self, rSilenceTresholdPercent = 0.1, rSilenceMinDuration = 0.3, nExtractJustFirsts = -1 ):\n nLimit = int( self.getSampleMaxValue() * rSilenceTresholdPercent / 100 ) \n print( \"INF: sound.Wav.split: splitting a sound of %5.3fs, using silence limits at %d for %5.3fs\" % (self.rDuration, nLimit, rSilenceMinDuration) ) \n aSplitted = []\n \n precalcWavIsNotSilence = np.abs(self.data)>nLimit\n\n #~ print self\n \n nCurrentPos = 0 # in data index (not sample)\n nSilenceMinLenData = rSilenceMinDuration * self.nAvgBytesPerSec * 8 / self.nNbrBitsPerSample\n while( nCurrentPos < len(self.data) ):\n \n # first find the beginning of a sound \n nFirstNonSilenceIndex = findFirstTrueValue( precalcWavIsNotSilence[nCurrentPos:] )\n #~ print( \"nFirstNonSilenceIndex (brut): %d\" % nFirstNonSilenceIndex )\n if( nFirstNonSilenceIndex == -1 ):\n # all remaining sound are silence!\n break\n nFirstNonSilenceIndex += nCurrentPos\n nNumFirstSample = nFirstNonSilenceIndex/self.nNbrChannel\n print( \"INF: sound.Wav.split: found a sound at sample %d\" % nNumFirstSample )\n nCurrentPos = nFirstNonSilenceIndex # so at the end, we're stopping\n \n # then find end\n nEndOfSilence = nNumFirstSample*self.nNbrChannel # init of the loop\n while( nEndOfSilence < len(self.data) ):\n #nFirstSilenceIndex = np.argmax( np.abs(self.data[nEndOfSilence:])<=nLimit )\n nFirstSilenceIndex = findFirstFalseValue( precalcWavIsNotSilence[nEndOfSilence:] ) \n #~ print( \"nFirstSilenceIndex (brut): %d (from %d)\" % (nFirstSilenceIndex, nEndOfSilence) )\n if( nFirstSilenceIndex == -1 ):\n break\n nFirstSilenceIndex += nEndOfSilence\n # ensure there's enough silence\n nEndOfSilence = findFirstTrueValue( precalcWavIsNotSilence[nFirstSilenceIndex:] )\n #~ print( \"nEndOfSilence (brut): %d (data: %d) (offset: %d)\" % (nEndOfSilence, self.data[nFirstSilenceIndex+nEndOfSilence],nEndOfSilence + nFirstSilenceIndex) )\n # positionnate onto the end of the silence for next time\n if( nEndOfSilence == -1 ):\n nCurrentPos = len(self.data)\n else:\n nCurrentPos = nEndOfSilence + nFirstSilenceIndex\n \n if( nEndOfSilence > nSilenceMinLenData or nEndOfSilence == -1 ):\n break\n nEndOfSilence += nFirstSilenceIndex\n # while - end\n \n # each time we're out, we've got a silence or we're at the end => new split\n if( nFirstSilenceIndex == -1 ):\n break\n nNumLastSample = nFirstSilenceIndex/self.nNbrChannel\n print( \"INF: sound.Wav.split: found the end of that sound at sample %d\" % nNumLastSample )\n if( nNumLastSample - nNumFirstSample > 4000 ):\n w = Wav()\n w.copyHeader( self )\n w.data = np.copy(self.data[nNumFirstSample*self.nNbrChannel:nNumLastSample*self.nNbrChannel])\n nPeakMax = max( max( w.data ), -min( w.data ) )\n if( nPeakMax > self.getSampleMaxValue() / 8 ): # remove glitch sound\n w.updateHeaderSizeFromDataLength()\n print( \"INF: sound.Wav.split: new split of %5.2fs\" % w.rDuration )\n aSplitted.append( w )\n #~ print( \"nCurLocalVs: %s\" % nCurLocalVs )\n if( nExtractJustFirsts != -1 and nExtractJustFirsts == len(aSplitted) ):\n print( \"WRN: sound.Wav.split: got enough split (%d), leaving...\" % len(aSplitted) )\n break\n # while - end\n print( \"INF: sound.Wav.split: created %d wav(s)\" % len( aSplitted ) )\n return aSplitted", "def split_list(a_list):\n half = len(a_list)/2\n return a_list[:half], a_list[half:]", "def as_list(self):\n return self._flattened_inputs", "def build_extracted_list(input_list, subinterval):\n out = []\n wait = subinterval\n for i in input_list:\n if wait == subinterval:\n out.append(i)\n wait = 0\n else:\n wait += 1\n return out", "def _send_signals(self, svc_names: List[str], sig: str):\n pass", "def send(signal, *args, **kw):\n result = []\n if signal in REGISTRY:\n result.extend(REGISTRY[signal].send(*args, **kw))\n return result" ]
[ "0.68666124", "0.637552", "0.6127554", "0.58000535", "0.5581344", "0.5568421", "0.5567797", "0.5507483", "0.54910624", "0.546959", "0.5445433", "0.539266", "0.53553843", "0.53532606", "0.5294758", "0.52855957", "0.52851367", "0.5276394", "0.52736336", "0.5243967", "0.5243068", "0.52381736", "0.5236668", "0.5222938", "0.5212559", "0.51642776", "0.5141297", "0.5127562", "0.51082045", "0.5094643", "0.5086553", "0.50823104", "0.5065536", "0.5056648", "0.5052488", "0.5046275", "0.5034341", "0.503427", "0.5030614", "0.5028401", "0.5026578", "0.5016797", "0.50095767", "0.49807674", "0.49772713", "0.4971016", "0.49653095", "0.49523607", "0.49423945", "0.49368742", "0.49261916", "0.49245206", "0.48972678", "0.48872706", "0.4879884", "0.48765272", "0.4869632", "0.4867751", "0.48557138", "0.4854763", "0.4851729", "0.48457068", "0.48405656", "0.48390466", "0.48343664", "0.48252827", "0.48104718", "0.47954395", "0.47945574", "0.47938615", "0.4789442", "0.4785172", "0.47700614", "0.4768055", "0.47670743", "0.476452", "0.47475487", "0.47454622", "0.47454622", "0.4745349", "0.47451288", "0.47339776", "0.4731815", "0.47306255", "0.4728862", "0.47275394", "0.4722343", "0.47137746", "0.4709613", "0.47055504", "0.47053778", "0.46984056", "0.46982333", "0.46929234", "0.46896532", "0.46827367", "0.46819752", "0.46814921", "0.46768177", "0.46721607" ]
0.67559826
1
v_1 w_1 + ... + v_n w_n
def dot(v,w): return sum(v_i * w_i for v_i, w_i in zip(v, w))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v,w)]", "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v, w)]", "def vector_add(v, w):\n\treturn [v_i + w_i for v_i, w_i in zip(v, w)]", "def add(v: Vector, w: Vector) -> Vector:\n assert len(v) == len(w), 'both vectors must have the same length'\n\n return [v_item + w_item for v_item, w_item in zip(v, w)]", "def dot_product(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v,w):\n return sum(v_i * w_i for v_i,w_i in zip(v,w))", "def dot(self,v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot_product(v,w):\n return v[0] * w[0] + v[1] * w[1]", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n\treturn sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v,w):\n return sum(v_i * w_i\n for v_i, w_i in zip(v,w))", "def dot(v, w):\n l = list(zip(v, w))\n return sum(v_i * w_i for v_i, w_i in l)", "def dot(v, w):\n return sum(v_i * w_i\n for v_i, w_i in zip(v, w))", "def project(v, w):\n coefficient = dot(v, w)\n return scalar_multiply(coefficient, w)", "def v(w,s):\n return w", "def add_vectors(v, u):\n return (v[0] + u[0], v[1] + u[1])", "def dot(v,w):\n return sum(v_i * w_i for v_i, w_i in zip(v,w)\n\ndef sum_of_squares(v):\n return dot(v, v)\n\nimport math", "def cg_apply_ws(A, B, w0, w1, w2, w3, pars):\n # We manually loop over one of the legs to lower the memory cost.\n vects = []\n # Generate the vectors to sum over in this manual loop,\n # for non-symmetric tensors:\n if A.qhape is None:\n dim = A.shape[5]\n for j in range(dim):\n vect = type(A).zeros([dim])\n vect[j] = 1.\n vects.append(vect)\n # and for symmetric tensors:\n else:\n qim = A.qhape[5]\n dim = A.shape[5]\n direction = A.dirs[5]\n for i, q in enumerate(qim):\n qdim = dim[i]\n for j in range(qdim):\n vect = type(A).zeros([dim], qhape=[qim], dirs=[-direction],\n charge=-direction*q, invar=True)\n vect[(q,)][j] = 1.\n vects.append(vect)\n # Compute the networks with the middle leg replaced with\n # vect \\otimes vect, and sum them all up.\n result = None\n for vect in vects:\n Ared = ncon((A, vect), ([-1,-2,-3,-4,-5,6], [6]))\n Bred = ncon((B, vect.conjugate()), ([-1,-2,-3,-4,5,-6], [5]))\n term = ncon((Ared, Bred,\n w0, w1, w2, w3),\n ([1,2,11,12,-5], [13,14,3,4,-6],\n [-1,1,13], [-2,2,14], [-3,11,3], [-4,12,4]))\n if result is None:\n result = term\n else:\n result += term\n return result", "def operations(h, w):\r\n A=np.random.random([h,w])\r\n B=np.random.random([h,w])\r\n s=A+B\r\n return A,B,s\r\n raise NotImplementedError", "def init_weight(w):\n shape = w.shape\n if len(shape) == 4:\n i, o, u, v = shape\n k = np.sqrt(6 / (i * u * v + o * u * v))\n w.data.uniform_(-k, k)\n elif len(shape) == 2:\n k = np.sqrt(6 / sum(shape))\n w.data.uniform_(-k, k)\n elif len(shape) == 1:\n w.data.zero_()", "def _func(w):\r\n W = _adj(w)\r\n loss, G_loss = _loss(W)\r\n h, G_h = _h(W)\r\n obj = loss + 0.5 * rho * h * h + alpha * h + lambda1 * w.sum()\r\n G_smooth = G_loss + (rho * h + alpha) * G_h\r\n g_obj = np.concatenate((G_smooth + lambda1, - G_smooth + lambda1), axis=None)\r\n return obj, g_obj", "def _eval(self, v):\n return super(weighted_sum_squares, self)._eval(self.weight * v)", "def weighted_sum(W, X):\n\n if len(W) != len(X):\n print(\"Dimension of weight vector should be same as input vector.\")\n return\n\n else:\n H = 0\n\n for i in range(len(W)):\n H += (W[i] * X[i])\n \n return H", "def nn(x, w):\n return np.dot(x, w)", "def SumM(v:'value', e:'error', w:'weight'=None):\n\n v = np.array(v)\n e = np.array(e)\n\n n = len(v)\n assert len(v) == len(e) \n if w is None:\n w = np.array([1.]*len(v))\n else:\n assert len(w) == len(v)\n w = np.array(w) / e**2\n wt = np.sum(w)\n w2t = np.sum(w**2)\n wti = 1/np.sum(w)\n yw = np.sum(w * v) * wti\n Qw = np.sum(w * (v - yw) ** 2)\n d2 = max(0, (Qw - (n-1)) / (wt - w2t*wti))\n wx = 1 / (e**2 + d2)\n wxti = 1 / np.sum(wx)\n a = np.sum(wx * v) * wxti\n e2 = wxti\n return a, np.sqrt(e2)", "def adj_se3(w, v):\n A = np.zeros((6, 6))\n A[0:3, 0:3] = hat3(w)\n A[4:6, 4:6] = hat3(w)\n A[4:6, 0:3] = hat3(v)\n return A", "def computeW(self):\n E = np.where(self.v > 0, 1, -1)\n # theshold the connections to only -1,1\n binary_weights = np.where(self.c > 0, 1, self.c)\n binary_weights = np.where(binary_weights < 0, -1, binary_weights)\n W = np.sum(binary_weights * np.dot(E.reshape(-1,1), E.reshape(1,-1))) # W = C * E * E\n self.W = W\n if np.sum(binary_weights) != 0:\n self.W = self.W / np.sum(binary_weights) # W / W*\n return self.W", "def vector_subtract(v, w):\n return [v_i - w_i for v_i, w_i in zip(v,w)]", "def vector_subtract(v, w):\n return [v_i - w_i for v_i, w_i in zip(v,w)]", "def vector_subtract(v, w):\n return [v_i - w_i for v_i, w_i in zip(v,w)]", "def vector_dot(v, w):\n return np.dot(v, w)", "def vector_subtract(v, w):\n return [v_i - w_i for v_i, w_i in zip(v, w)]", "def vector_and(v, w):\n return [v_i and w_i for v_i, w_i in zip(v, w)]", "def RSS(X,Y,w):\n v = Y[:,0]- (np.dot(X,w[1:]) + w[0])\n return np.dot(v,v)", "def apply_weights(self):\n w0_array = np.ones(self.N)*self.w0\n return w0_array + self.X.dot(self.w)", "def apply(self, v):\n u = np.zeros(self.Dimension, dtype=complex)\n for me in self.Elements:\n for index in range(v.Elements.size):\n if index == me.j:\n u[me.i] += me.val * v.Elements[index]\n u = Vector(u) \n return u", "def protrudes((u,v)):\r\n return ((u,v,W), (u,v,S), (u,v-1,W), (u-1,v,S))", "def _adj(w):\r\n return (w[:d * d] - w[d * d:]).reshape([d, d])", "def extforce (u, v):\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n u[i,j], v[i,j] = np.stack((u[i,j], v[i,j])) + dt * extacc\r\n\r\n return u, v", "def add_vectors(u, v): #11.22.5\r\n new_vector = []\r\n \"\"\"Because they have same length so we\r\n should take advantage from this one\"\"\"\r\n for i in range(len(u)):\r\n m = u[i] + v[i] # Get their value of i index at the same time!\r\n new_vector.append(m)\r\n return new_vector", "def project(v, w):\n projection_length = dot(v, w)\n return scalar_multiply(projection_length, w)", "def vector_proj(v, w):\n w_hat = vector_hat(w)\n return vector_dot(v, w_hat) * w_hat", "def compress_weights(W, l):\n\n # numpy doesn't seem to have a fast truncated SVD algorithm...\n # this could be faster\n U, s, V = np.linalg.svd(W, full_matrices=False)\n\n Ul = U[:, :l]\n sl = s[:l]\n Vl = V[:l, :]\n\n L = np.dot(np.diag(sl), Vl)\n return Ul, L", "def distance(v, w):\n\treturn magnitude(vector_subtract(v, w))", "def optimise(w, w_delta):\n return w.assign(w - w_delta)", "def vector_or(v, w):\n return [v_i or w_i for v_i, w_i in zip(v, w)]", "def advect (u, v):\r\n # NOTICE: memory usage might be too high, could optimize\r\n\r\n # Store the values from timestep n\r\n un = u\r\n vn = v\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n oldpos = coord (i,j) - dt * np.stack((u[i,j], v[i,j]))\r\n u[i,j], v[i,j] = interpolate (un, vn, oldpos)\r\n\r\n\r\n # Return values for timestep n+1\r\n return u, v", "def setParamsFromVector(self, params):\n #starting point of w_ih weights in vectorised params\n w_ih_start_pos = 0\n #end point of w_ih weights in vectorised params\n w_ih_end_pos = self.hiddenLayerSize * self.inputLayerSize\n\n self.w_ih = np.reshape( params[ w_ih_start_pos : w_ih_end_pos ], \\\n ( self.inputLayerSize, self.hiddenLayerSize ) )\n\n #end point of w_ho weights in vectorised params\n w_ho_end_pos = w_ih_end_pos + self.hiddenLayerSize * self.outputLayerSize\n\n self.w_ho = np.reshape( params[ w_ih_end_pos : w_ho_end_pos ], \\\n ( self.hiddenLayerSize, self.outputLayerSize))\n\n #end point of b_h biases in vectorised params\n b_h_end_pos = w_ho_end_pos + self.hiddenLayerSize\n \n self.b_h = params[ w_ho_end_pos : b_h_end_pos ]\n \n #end point of b_o biases in vectorised params\n b_o_end_pos = b_h_end_pos + self.outputLayerSize\n \n self.b_o = params[ b_h_end_pos : b_o_end_pos ]", "def stabilizer_vector(v, g, n):\n vg = v.copy()\n w = v.copy()\n for i in range(1, n):\n vg *= g \n w += vg\n assert v == vg * g\n if (w['B'] == 0).all():\n return None\n return w", "def _derW(self, w, x, y, z):\n raise NotImplementedError()", "def vector_weighted_average(vf, weights):\n weights_sum = weights.sum()\n y_average = (vf[:,:,0] * weights).sum() / weights_sum\n x_average = (vf[:,:,1] * weights).sum() / weights_sum\n return np.array([y_average, x_average])", "def wedge_distance(u, v):\n n_it = np.size(u)\n sum = 0\n for i in range(1, n_it):\n for j in range(i):\n sum += np.abs(u[i] * v[j] - u[j] * v[i]) ** 2\n return sum", "def scalar_proj(v, w):\n return vector_dot(v, vector_hat(w))", "def dot(v: Vector, w: Vector) -> float:\n assert len(v) == len(w), \"vectors must be same length\"\n\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def weight_update(u_ff, u_wc, alpha, beta, w, fan_all):\r\n mult_wc = np.matmul(np.reshape(hard_sigmoid_array(u_wc), (fan_all, 1)),\r\n np.reshape(hard_sigmoid_array(u_wc), (1, fan_all)))\r\n mult_ff = np.matmul(np.reshape(hard_sigmoid_array(u_ff), (fan_all, 1)),\r\n np.reshape(hard_sigmoid_array(u_ff), (1, fan_all)))\r\n delta_w = alpha * (1 / beta) * (mult_wc - mult_ff)\r\n delta_w[np.diag_indices(fan_all)] = 0\r\n w = w + delta_w\r\n return w", "def vecvari1(array,W,B=None,sqrt=False,BB=False,BS=False,verbose=False,sizz=1,\r\n KCD=False,mulb=False,mul2=False,v3=0,**kwargs):\r\n \r\n arrs=array.shape\r\n #array=np.expand_dims(array,len(array.shape)//2)\r\n ashp=W.shape\r\n dstp=arrs[0]-1 if not((arrs[0]-1)==0) else 1\r\n if verbose:\r\n print(\"VECVARI1:: B? {},SQRT {}, BB {}, BS {}, SIZZ {}, KCD {}, MULB {}, MUL2 {}\".format(\r\n not(B is None),bool(sqrt),bool(BB),bool(BS),sizz,bool(KCD),bool(mulb),bool(mul2)))\r\n print('arrayshape',arrs)\r\n if verbose==2:\r\n print('Wsample',W[:,:,-1,-1])\r\n else:\r\n print('Wsample',W[:,:,-1,-1])\r\n if not(B is None):\r\n print(\"Bsamp\",B)\r\n print('wshape',ashp)\r\n if B is None:\r\n B=np.zeros((1,1,1,1),dtype=np.float32)#channel\r\n bt=len(B.shape)==2\r\n xi=(-2,-1)#xi=(-1,-2)\r\n x2=(-3,-2,-1)\r\n if len(ashp)==5 :#not all data and all weights == 3d data\r\n xi=(-3,-2,-1)\r\n x2=(-4,-3,-2,-1)\r\n if v3:\r\n if mulb:#probably a bad idea\r\n mul=array+B\r\n else:\r\n mul=array\r\n else:\r\n if mulb:#probably a bad idea\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n mul=(array*W)+B\r\n else:\r\n mul=array*W\r\n size=np.sum(W,axis=xi,keepdims=True)#shape=(outputs, channel)\r\n\r\n if BB :\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n if verbose:\r\n if verbose==2:\r\n print('mulsamp',mul[:,-1,-1,::dstp],'arrsamp',array[-1,-1,:])\r\n else:\r\n print('mulsamp',mul[-1,-1,-1],'arrsamp',array[-1,-1,-1])\r\n print('sizsamp',size)\r\n print('bbb',B.shape)\r\n print(\"size\",size.shape)\r\n if sizz==1:#not a good idea\r\n mean=np.sum((mul),axis=xi,keepdims=True)/size\r\n else:\r\n mean=np.sum((mul),axis=xi,keepdims=True)/np.broadcast_to([ashp[-2]*ashp[-1]],(ashp[1],1,1))\r\n if verbose:\r\n if verbose==2:\r\n print(\"meanshape\",mean.shape)\r\n print(\"meansamp\",mean[:,:,:,::dstp,-1,-1,-1])\r\n else:\r\n print(\"meansamp\",mean[-1,:,:,-1,-1,-1,-1])\r\n print(\"etst\",mean.shape)\r\n if verbose==2:\r\n print(\"ameanshp\",(mul-mean).shape)\r\n print(\"amean\",(mul-mean)[:,:,:,::dstp,-1,-1])\r\n else:\r\n print(\"amean\",(mul-mean)[-1,-1,-1])\r\n if mul2:\r\n if mulb:#probably a bad idea\r\n mul=((array-mean)*W)+B\r\n else:\r\n mul=((array-mean)*W)\r\n i=(np.square(mul))/size\r\n else:\r\n if v3==1:\r\n if BB:\r\n i=(np.square(((array-mean)*W)+B)/size)#B could be included\r\n else:\r\n i=(np.square(((array-mean)*W))/size)#B could be included\r\n if v3==2:#not a good idea\r\n if BB:\r\n i=((np.square(array-mean)*W)+B)/size#B could be included\r\n else:\r\n i=((np.square(array-mean)*W))/size#B could be included\r\n if v3==3:\r\n if BB:\r\n i=((np.square(array-mean)/size)*W)+B#B could be included\r\n else:\r\n i=((np.square(array-mean)/size)*W)#B could be included\r\n else:\r\n if BB:\r\n i=(np.square((mul)-mean)+B)/size\r\n else:\r\n i=(np.square((mul)-mean))/size\r\n if KCD:\r\n out=np.sum(i,axis=xi)\r\n else:\r\n out=np.rollaxis(np.sum(i,axis=x2),-1,1)\r\n if verbose:\r\n print(i.shape)\r\n if verbose==2:\r\n print('ishp',i.shape)\r\n print('isample',i[:,-1,-1,::dstp],i.dtype)\r\n else:\r\n print('isample',i[-1,-1,-1],i.dtype)\r\n if sqrt:\r\n out=np.sqrt(out)\r\n if verbose:\r\n if verbose==2:\r\n print('oushp',out.shape)\r\n print(\"outsample\",out[:,::dstp,-1,-1])\r\n else:\r\n print(\"outsample\",out[-1,-1,-1])\r\n print(\"out\",out.shape,(arrs[0],ashp[0],arrs[1],arrs[2]))\r\n if KCD:\r\n out=np.reshape(out,(arrs[0],ashp[0]*arrs[-3],arrs[1],arrs[2]))\r\n else:\r\n assert out.shape==(arrs[0],ashp[0],arrs[1],arrs[2])\r\n if not(BB)and BS:\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n return(out+B[:,0])\r\n else:\r\n return(out)", "def test_amp_sums_can_be_simplified(free_alg):\n dr = free_alg\n v = dr.names.v\n n, i, j = symbols('n i j')\n x = IndexedBase('x')\n r = Range('D', 0, n)\n\n tensor = dr.sum((i, r), (j, r), i ** 2 * x[j] * v[j])\n res = tensor.simplify_sums()\n assert res == dr.sum((j, r), (\n n ** 3 / 3 - n ** 2 / 2 + n / 6\n ) * x[j] * v[j])", "def __call__(self, w):\n l1_term = self.alpha * np.linalg.norm(w, 1)\n l2_term = self.alpha * 0.5 * np.linalg.norm(w, 2)\n\n return self.r * l1_term + (1 - self.r) * l2_term", "def distance(v, w):\n return magnitude_of_vector(vector_subtract(v, w))", "def f(t, x, n, v):\n total = 0\n for i in range(n+1):\n for j in range(n+1):\n for k in range(v):\n total = t[i][j] * x[i][j][k]", "def update_params(self, v_0, h_0, v_k, h_k):\n pos = np.dot(np.transpose(v_0), h_0)\n pos_vb = np.sum(v_0, axis=0)\n pos_hb = np.sum(h_0, axis=0)\n neg = np.dot(np.transpose(v_k), h_k)\n neg_vb = np.sum(v_k, axis=0)\n neg_hb = np.sum(h_k, axis=0)\n self.delta_bias_v = self.momentum*self.delta_bias_v + (self.learning_rate/self.batch_size)*(pos_vb - neg_vb)\n self.bias_v += self.delta_bias_v\n self.delta_bias_h = self.momentum*self.delta_bias_h + (self.learning_rate/self.batch_size)*(pos_hb-neg_hb) \n self.bias_h += self.delta_bias_h\n self.delta_weight_vh = self.momentum*self.delta_weight_vh + self.learning_rate*((pos - neg)/self.batch_size - self.decay*self.weight_vh)\n self.weight_vh += self.delta_weight_vh \n return", "def dot(v: Vector, w: Vector) -> float:\n assert len(v) == len(w), 'vectors must be the same length'\n\n return sum(v_item * w_item for v_item, w_item in zip(v, w))", "def w_update(u, H, gamma, D, C):\n w_next = [proj(H[i].dot(u), gamma[i], D[i], C[i]) for i in range(len(H))]\n return w_next", "def getParamsToVector(self):\n #vectorise and concat weights arrays\n weights = np.concatenate( ( self.w_ih.flatten(), self.w_ho.flatten() ) )\n # concat biases vectors\n biases = np.concatenate( ( self.b_h, self.b_o ) )\n # concat weights and biases into params\n params = np.concatenate( ( weights, biases ) )\n return params", "def feature_energy24(wv):\n return np.sqrt(np.sum(wv[2:22, :, :] ** 2, axis=0)).T", "def _optimize(self, v):\n v0, prob_h_v0, vk, prob_h_vk = self._gibbs_sampling(v)\n W_grad, a_grad, b_grad = self._compute_gradients(v0, prob_h_v0, vk, prob_h_vk)\n para_update = [tf.assign(self.W, tf.add(self.W, self.learning_rate*W_grad)),\n tf.assign(self.a, tf.add(self.a, self.learning_rate*a_grad)),\n tf.assign(self.b, tf.add(self.b, self.learning_rate*b_grad))]\n error = tf.metrics.mean_squared_error(v0, vk)[1]\n return para_update, error", "def squared_distance(v, w):\n\treturn sum_squares(vector_subtract(v, w))", "def sumouter(us,vs,lo=-1.0,hi=1.0,out=None):\n result = zeros((len(us[0]),len(vs[0])))\n for u,v in zip(us,vs):\n result += outer(clip(u,lo,hi),v)\n return result", "def weight_expr(self, t, w_plus, z, value):\n pass", "def quad(v1, v2, v3, v4):\n return [[v3, v2, v1], [v4, v3, v2]]", "def test_numbers_can_substitute_vectors(free_alg, full_balance):\n\n dr = free_alg\n p = dr.names\n\n x = IndexedBase('x')\n y = IndexedBase('y')\n r = p.R\n i, j, k, l = symbols('i j k l')\n v = p.v\n w = Vec('w')\n\n orig = dr.sum((i, r), (j, r), x[i, j] * v[i] * w[j] + y[i, j] * v[i] * v[j])\n\n res = orig.subst(v[k], 0, full_balance=full_balance).simplify()\n assert res == 0\n res = orig.subst(v[i], 1, full_balance=full_balance).simplify()\n assert res == dr.sum((i, r), (j, r), x[j, i] * w[i] + y[i, j])", "def vector_dist(v, w):\n if isinstance(v, list):\n v = np.asarray(v)\n return vector_mag(v - w)", "def V_particle_ablation(s_n, g_n, s_others, n_h1=64, n_h2=64):\n concated = tf.concat( [s_n, g_n, s_others], axis=1 )\n with tf.variable_scope(\"stage-2\"):\n h1 = tf.layers.dense(inputs=concated, units=n_h1, activation=tf.nn.relu, use_bias=True, name='V_h1')\n h2 = tf.layers.dense(inputs=h1, units=n_h2, activation=tf.nn.relu, use_bias=True, name='V_h2')\n out = tf.layers.dense(inputs=h2, units=1, activation=None, use_bias=False, name='V_out')\n return out", "def weights(self):\n \n n = self.n\n lambda_ = self.alpha**2 * (n +self.kappa) - n\n \n c = .5 / (n + lambda_)\n Wc = np.full(2*n + 1, c)\n Wm = np.full(2*n + 1, c)\n Wc[0] = lambda_ / (n + lambda_) + (1 - self.alpha**2 + self.beta)\n Wm[0] = lambda_ / (n + lambda_)\n \n return Wm, Wc", "def plotWeights(w):\n w = w[:,:,0,:]\n # rescale w to 0.0 - 1.0\n mincode = np.amin(w)\n maxcode = np.amax(w)\n w = (w - mincode) / (maxcode - mincode)\n\n out = np.zeros((15, 15))\n for x in range(0,4):\n for y in range(0,4):\n c = x*4+y\n out[x*4:x*4+3, y*4:y*4+3] = w[:,:,c]\n return out", "def weno_nn(\n self,\n v: types.FlowFieldVal,\n dim: str,\n ) -> Tuple[types.FlowFieldVal, types.FlowFieldVal]:\n delta_neg, delta_pos = self._calculate_weno_nn_delta_layer(v, dim)\n weno_wt_neg, weno_wt_pos = self._calculate_weno_nn_weights(\n delta_neg, delta_pos,\n )\n vr_neg, vr_pos = interpolation._reconstruct_weno_face_values( # pylint: disable=protected-access\n v, self._kernel_op, dim=dim, k=self._k\n )\n v_neg, v_pos = interpolation._interpolate_with_weno_weights( # pylint: disable=protected-access\n v, weno_wt_neg, weno_wt_pos, vr_neg, vr_pos, dim=dim, k=self._k\n )\n return v_neg, v_pos", "def dot(u, v, w, a, b):\n u_1, u_2 = u\n v_1, v_2 = v\n return (w*u_1 + b*u_2)*(w*v_1 + b*v_2) + abs(a)*u_1*v_1", "def expandW(w, n_hidden_units):\n i1 = 784 * n_hidden_units\n i2 = i1 + n_hidden_units\n i3 = i2 + n_hidden_units * 10\n i4 = i3 + 10\n assert i4 == w.size, str(i4) + ' ' + str(w.size)\n W1 = w[0:i1].reshape((n_hidden_units, 784))\n b1 = w[i1:i2]\n W2 = w[i2:i3].reshape((10, n_hidden_units))\n b2 = w[i3:i4]\n return W1, b1, W2, b2", "def constraint_sum(w):\n return sum(w) - 1", "def wsum_rvs(mu: np.ndarray, cov: np.ndarray, w: np.ndarray\n ) -> (np.ndarray, np.ndarray):\n mu1 = mu * w # type: np.ndarray\n ndim = mu1.ndim\n # not using axis=-1, to make it work with DataFrame and Series\n mu1 = mu1.sum(axis=ndim - 1)\n cov1 = (cov * (w[..., None] * w[..., None, :])\n ).sum(axis=ndim).sum(axis=ndim - 1)\n return mu1, cov1", "def _update_w(self, idx):\n self.w = ((self._w - 0.4) * (self._generations - idx)) /\\\n (self._generations + 0.4)", "def _derW(self, w, x, y, z):\n if _isscalar(w):\n w_pos = max(min(self.wSearchFunc(self.w_list, w), self.w_n - 1), 1)\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(self.zSearchFunc(self.z_list, z), self.z_n - 1), 1)\n else:\n w_pos = self.wSearchFunc(self.w_list, w)\n w_pos[w_pos < 1] = 1\n w_pos[w_pos > self.w_n - 1] = self.w_n - 1\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n z_pos = self.zSearchFunc(self.z_list, z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n i = w_pos # for convenience\n j = x_pos\n k = y_pos\n l = z_pos\n beta = (x - self.x_list[j - 1]) / (self.x_list[j] - self.x_list[j - 1])\n gamma = (y - self.y_list[k - 1]) / (self.y_list[k] - self.y_list[k - 1])\n delta = (z - self.z_list[l - 1]) / (self.z_list[l] - self.z_list[l - 1])\n dfdw = (\n (\n (1 - beta)\n * (1 - gamma)\n * (1 - delta)\n * self.f_values[i, j - 1, k - 1, l - 1]\n + (1 - beta) * (1 - gamma) * delta * self.f_values[i, j - 1, k - 1, l]\n + (1 - beta) * gamma * (1 - delta) * self.f_values[i, j - 1, k, l - 1]\n + (1 - beta) * gamma * delta * self.f_values[i, j - 1, k, l]\n + beta * (1 - gamma) * (1 - delta) * self.f_values[i, j, k - 1, l - 1]\n + beta * (1 - gamma) * delta * self.f_values[i, j, k - 1, l]\n + beta * gamma * (1 - delta) * self.f_values[i, j, k, l - 1]\n + beta * gamma * delta * self.f_values[i, j, k, l]\n )\n - (\n (1 - beta)\n * (1 - gamma)\n * (1 - delta)\n * self.f_values[i - 1, j - 1, k - 1, l - 1]\n + (1 - beta)\n * (1 - gamma)\n * delta\n * self.f_values[i - 1, j - 1, k - 1, l]\n + (1 - beta)\n * gamma\n * (1 - delta)\n * self.f_values[i - 1, j - 1, k, l - 1]\n + (1 - beta) * gamma * delta * self.f_values[i - 1, j - 1, k, l]\n + beta\n * (1 - gamma)\n * (1 - delta)\n * self.f_values[i - 1, j, k - 1, l - 1]\n + beta * (1 - gamma) * delta * self.f_values[i - 1, j, k - 1, l]\n + beta * gamma * (1 - delta) * self.f_values[i - 1, j, k, l - 1]\n + beta * gamma * delta * self.f_values[i - 1, j, k, l]\n )\n ) / (self.w_list[i] - self.w_list[i - 1])\n return dfdw", "def subtract(v: Vector, w: Vector) -> Vector:\n assert len(v) == len(w), 'both vectors must have the same length'\n\n return [v_item - w_item for v_item, w_item in zip(v, w)]", "def u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=50): \n # PREMULTIPLIED LHS IS AN EXTRA ARGUMENT! Set it to None and add solver! \n \"\"\"In the following +[[]] and [:-1] are added to keep thing 1dim array of objects and still multiply it elemtwisely\"\"\" \n# #B.append([]) #THIS IS WRONG, CHANGES THE LIST \n# B_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(B+[[]])[:-1], axis = 0) \n# A_ls = np.concatenate([(1/np.sqrt(2*eta0))*A, B_concat], axis = 0) \n# #print(np.array(B).shape) \n# #print(w[0].shape) \n# #print(w, eta) \n# #w.append([]) THIS IS WRONG, CHANGES THE LIST \n# w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n# eta_w = np.expand_dims(1/np.sqrt(2*eta),1)*np.array(w) \n# print(eta_w.shape) \n# b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, eta_w.flatten()], axis = 0) \n #Use correct broadcasting?\n w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, w_concat, (1/np.sqrt(2*eta_lin))*w_lin], axis = 0) \n# print(np.sum(eta_w.flatten() != w_concat)) \n# premultiplied_time_start = time.time() \n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray() \n# premultiplied_time_end = time.time() \n# print('premultiplying took {}'.format(premultiplied_time_end - premultiplied_time_start)) \n# premultiplied_rhs = eta_T_H_stacked.T.dot(b_ls) \n# u_next = nnls_predotted(premultiplied_lhs, premultiplied_rhs, tol=1e-5) \n# print(eta_T_H_stacked.shape, b_ls.shape) \n# A_ls_t_b = eta_T_H_stacked.T.dot(b_ls) \n# w =scipy.sparse.linalg.spsolve_triangular(RT, A_ls_t_b, lower = True) \n# x = scipy.sparse.linalg.spsolve_triangular(R, w, lower = False) \n# u_next = x \n u_next = scipy.optimize.lsq_linear(eta_T_H_L_stacked, b_ls, bounds = (0, np.inf), tol=1e-3, lsmr_tol=1e-3, max_iter=nnls_max_iter, verbose=1).x \n# u = scipy.optimize.lsq_linear(premultiplied_lhs, premultiplied_rhs, bounds = (0, np.inf), tol=1e-5).x \n return u_next", "def lca(self, v, w):", "def squared_distance(v, w):\n return sum_of_squares(vector_subtract(v, w))", "def u_weights(self):\n for i in range(self.n_inputs):\n self._q_neuron.cx(self._weights[i], self.inputs[i])", "def __add__(self, v):\n return vector(self.x + v.x, self.y + v.y, self.z + v.z)", "def resmlp(self, x, w3, w2, b2, w, b):\r\n return tf.matmul(tf.nn.tanh(tf.matmul(x, w2) + b2), w) + tf.matmul(x,\r\n w3) + b", "def squared_distance(v, w):\n return sum_of_squares(vector_subtraction(v, w))", "def _create_weight_update_ops(self):\n with tf.name_scope(\"Weight_Update_Operators\"):\n self.weight_vars_assign_ops = []\n for weight_matrix, grad in zip(self._train_vars, self.step_direction_variables):\n self.weight_vars_assign_ops.append(\n tf.assign_add(weight_matrix, self._step_on_line_plh * -grad / self.norm_of_gradient_var).op)", "def forward(self, w_value, x1_value, x2_value, b_value):\n self.inputs = [w_value, x1_value, x2_value, b_value]\n\n x_input = np.asarray([x1_value, x2_value]).T\n # return np.matmul(x_value, w_value) + b_value # [Note] Matmul Order\n return x_input.dot(w_value) + b_value # [Note] Matmul Order", "def _assemble_W(self):\n L = torch.tril(self.L, diagonal=-1) + torch.diag(torch.ones(self.dim))\n U = torch.triu(self.U, diagonal=1)\n W = self.P @ L @ (U + torch.diag(self.S))\n return W", "def objective(V, W, h):\n return np.linalg.norm(v - w @ h, ord = 'fro')", "def objective(self,w):\n diffs = self.get_y_times_diffs(self.get_split_weights(w))\n #print diffs, sigmoid(diffs)\n obj = -np.sum(np.log(sigmoid(diffs))) #negative, since minimising\n # regularisation\n obj += 0.5 * self.alpha * np.dot(w[:self.interp_index[0]], w[:self.interp_index[0]])\n return obj", "def new_w(w, d):\n\n if w.sum() > 0:\n next_w = w.copy()\n next_w[next_w > 0] -= 1\n return next_w\n else:\n if d[0] == 1:\n return np.array([51,0,0])\n elif d[1] == 1:\n return np.array([0,51,0])\n else:\n return np.array([0,0,51])" ]
[ "0.7097757", "0.70765364", "0.7075953", "0.65045404", "0.64145964", "0.6388513", "0.63860494", "0.6356628", "0.6353366", "0.6353366", "0.6353366", "0.6353366", "0.6353366", "0.6330879", "0.63247854", "0.6277021", "0.62067485", "0.6178578", "0.6085259", "0.60823065", "0.6049024", "0.6004038", "0.59812975", "0.5978067", "0.5976902", "0.59527993", "0.59234506", "0.58657324", "0.58530927", "0.58495235", "0.5841433", "0.583452", "0.583452", "0.583452", "0.5821268", "0.58203", "0.5808307", "0.580772", "0.5801171", "0.57886845", "0.57785535", "0.5769955", "0.5769335", "0.576857", "0.5756145", "0.57295406", "0.56760025", "0.5668172", "0.5663344", "0.56599915", "0.5653355", "0.56422144", "0.56418437", "0.563582", "0.5635543", "0.56039715", "0.5600997", "0.5592601", "0.5588224", "0.5588071", "0.55811715", "0.5575528", "0.55494845", "0.5542775", "0.5539912", "0.55373394", "0.55345476", "0.55281776", "0.55182743", "0.55161214", "0.5510504", "0.55059546", "0.54999113", "0.54918694", "0.5485845", "0.548236", "0.54803187", "0.5467404", "0.54448754", "0.54433215", "0.5433214", "0.5430972", "0.5426337", "0.5412096", "0.5410501", "0.54085934", "0.5405172", "0.53991395", "0.53971297", "0.539626", "0.539561", "0.5393468", "0.5388043", "0.53739864", "0.53694165", "0.5367563", "0.536638", "0.5347662", "0.53467935", "0.5344862" ]
0.6338228
13
v_1 v_1 + ... + v_n v_n
def sum_of_squares(x): return dot(x, x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_vectors(v, u):\n return (v[0] + u[0], v[1] + u[1])", "def vector_add(v1, v2):\n return v1[0] + v2[0], v1[1] + v2[1]", "def __add__(self, _v):\n\t\tif len(self) == len(_v):\n\t\t\tans = copy.deepcopy(self)\n\t\t\tfor i in range(0, self.n):\n\t\t\t\tans[i] += _v[i]\n\t\t\treturn ans", "def vector_add(v, w):\n\treturn [v_i + w_i for v_i, w_i in zip(v, w)]", "def sum(a,v):\n return a+v", "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v, w)]", "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v,w)]", "def add_vectors(u, v): #11.22.5\r\n new_vector = []\r\n \"\"\"Because they have same length so we\r\n should take advantage from this one\"\"\"\r\n for i in range(len(u)):\r\n m = u[i] + v[i] # Get their value of i index at the same time!\r\n new_vector.append(m)\r\n return new_vector", "def __add__(self, v):\n return vector(self.x + v.x, self.y + v.y, self.z + v.z)", "def vector_sum(a, b):\n return a[0] + b[0], a[1] + b[1]", "def ADD (self, n1, n2):", "def advect (u, v):\r\n # NOTICE: memory usage might be too high, could optimize\r\n\r\n # Store the values from timestep n\r\n un = u\r\n vn = v\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n oldpos = coord (i,j) - dt * np.stack((u[i,j], v[i,j]))\r\n u[i,j], v[i,j] = interpolate (un, vn, oldpos)\r\n\r\n\r\n # Return values for timestep n+1\r\n return u, v", "def quad(v1, v2, v3, v4):\n return [[v3, v2, v1], [v4, v3, v2]]", "def sum_series(n,v1=0,v2=1):\n\tL1=v2\n\tL2=v1\n\tif n<0:\n\t\tprint(\"please enter positive int value\")\n\n\telif n==0:\n\t\treturn v1\n\n\telif n==1:\n\t\treturn v2\n\n\telse:\n\t\tfor i in range(n-1):\n\t\t\tC=L1+L2\n\t\t\tL2=L1\n\t\t\tL1=C\n\t\treturn C", "def vector_sum(vectors):\n results = vectors[0]\n for vector in vectors[1:]:\n results = vector_add(results, vector)\n return results", "def f(t, x, n, v):\n total = 0\n for i in range(n+1):\n for j in range(n+1):\n for k in range(v):\n total = t[i][j] * x[i][j][k]", "def multi_1(cur,p,n):\n\tr=p\n\tfor k in range(0,n-1):\n\t\tr=sum(cur,r,p)\n\treturn r", "def sum_series(i, v1=0, v2=1):\n if i == 1:\n v = v1\n elif i == 2: \n v = v2\n else: \n v = sum_series(i - 2, v1, v2) + sum_series(i - 1, v1, v2)\n return v", "def vector_sum(vectors):\n\tresult = vectors[0]\n\tfor vector in vectors:\n\t\tresult = vector_add(result, vector)\n\treturn result", "def evs_do_stuff(e, v, s):\n\n vish = e + v + s + v\n vish = vish + 1\n return vish", "def testSum(self):\n v1 = Vector(1, 2, 3)\n v2 = Vector(4, 5, 6)\n v1 += v2\n assert(len(v1) == 3)\n assert v1[0] == 5\n assert v1[1] == 7\n assert v1[2] == 9\n\n v1 = Vector(9, 8, 7)\n v2 = Vector(3, 2, 1)\n v1 -= v2\n assert len(v1) == 3\n assert v1[0] == 6\n assert v1[1] == 6\n assert v1[2] == 6", "def test_add_different_sizes():\n Vector(1.0) + Vector(2.0, 3.0)", "def vectorAdd(a, b):\n return [a[i] + b[i] for i, j in enumerate(a)]", "def add(value_m, value_n):\n return value_m - value_n", "def test__vector_addition__given_two_vector__return_correct_vector():\n assert Vector((0, 1, 2)) + Vector((3, 4, 5)) == Vector((3, 5, 7))", "def test_add_to_vx(self, cpu):\n for x in range(0x0, 0xF):\n for v in range(0x0, 0xFF):\n for kk in range(0x0, 0xFF):\n cpu.V_register[x] = v\n cpu.opcode = 0x7000 | (x << 8) | kk\n cpu.add_to_vx()\n assert(cpu.V_register[x] == (v + kk) & 0xFF)", "def test_add(self):\n\n for i in range(1, 200 + 1):\n\n for j in range(1, 200 + 1):\n\n for k in range(1, 200 + 1):\n\n value = i + j + k\n assert value == add(i, j, k)", "def add4(a,b):\n return [a[0]+b[0],a[1]+b[1],a[2]+b[2],a[3]+b[3]]", "def ADD_I_Vx(self, x):\n\t\tself.I += self.V[x]", "def __add__(self, v2):\n\t\treturn Vect2D(self._vec+v2._vec)", "def sum(cls, vectors):\n result = cls.null()\n for vector in vectors:\n result += vector\n return result", "def stabilizer_vector(v, g, n):\n vg = v.copy()\n w = v.copy()\n for i in range(1, n):\n vg *= g \n w += vg\n assert v == vg * g\n if (w['B'] == 0).all():\n return None\n return w", "def __iadd__(self,value):\n if isinstance(value,LiveStat):\n raise Exception(\"Cannot sum statistics\")\n if value.vcount < 1 or self.vcount < 1:\n raise Exception(\"Cannot sum empty statistics\")\n else:\n # sum of two considered pairwise: z_i = stat(x_i + y_i)\n #\n # data have different weights due to number of samples.. TODO\n self.vmin += value.vmin \n self.vmax += value.vmax\n self.vmean += value.vmean\n self.vsum += value.vsum\n # variance is sum of variance?\n self.vm2 += value.vm2\n # TODO vm3 vm4\n self.vcount = min(value.vcount,self.vcount)\n self.vcountsq = self.vcount**2\n self.dirty = True\n print (\"add Missing: M3 and M4\")\n else:\n # constant bias\n if self.vmin is not None:\n self.vmin += value\n self.vmax += value\n self.vmean += value\n self.vsum += self.vcount*value\n print (\"add Missing: M3 and M4\")\n self.dirty = True\n return self", "def add(self,v2): \n n = len(self.a)\n m = len(v2.a)\n c = []\n if n != m:\n print(\"Incompatible Types\")\n return\n\n for i in range(n):\n c.append(self.a[i]+v2.a[i])\n\n return c", "def combine(k1, k2, k3, k4):\n\n return k1 + (k2 * 2.0) + (k3 * 2.0) + k4", "def test_compose_vectors_with_attrs(self):\n oe = expression.OperationalExpression\n v1, v2 = map(expression.Variable, [\"v1\", \"v2\"])\n exp = 1 + V(v1.x, v2.y)\n expected_exp = oe('+', 1, V(v1.x, v2.y))\n self.assert_equal(exp, expected_exp)", "def sum_value(self, lv, rv):", "def test_numbers_can_substitute_vectors(free_alg, full_balance):\n\n dr = free_alg\n p = dr.names\n\n x = IndexedBase('x')\n y = IndexedBase('y')\n r = p.R\n i, j, k, l = symbols('i j k l')\n v = p.v\n w = Vec('w')\n\n orig = dr.sum((i, r), (j, r), x[i, j] * v[i] * w[j] + y[i, j] * v[i] * v[j])\n\n res = orig.subst(v[k], 0, full_balance=full_balance).simplify()\n assert res == 0\n res = orig.subst(v[i], 1, full_balance=full_balance).simplify()\n assert res == dr.sum((i, r), (j, r), x[j, i] * w[i] + y[i, j])", "def apply(self, v):\n u = np.zeros(self.Dimension, dtype=complex)\n for me in self.Elements:\n for index in range(v.Elements.size):\n if index == me.j:\n u[me.i] += me.val * v.Elements[index]\n u = Vector(u) \n return u", "def dot_product(v1, v2):\n #print(v1, v2)\n sum = 0\n\n for i in range(len(v1)):\n #print(v1[i], v2[i])\n sum += v1[i] * v2[i]\n return sum", "def vector_add(a, b):\n assert(len(a) == len(b))\n\n from operator import add\n return tuple(map(add, a, b))", "def add(v: Vector, w: Vector) -> Vector:\n assert len(v) == len(w), 'both vectors must have the same length'\n\n return [v_item + w_item for v_item, w_item in zip(v, w)]", "def add_three(v1, v2, v3):\n p = v1 + v2 + v3\n print(\"The sum of the three numbers is: {}\".format(p))\n return p", "def lcombine( v1, v2, k1, k2 ):\n return [ x*k1 + y*k2 for (x,y) in izip(v1,v2) ]", "def __add__(self, other):\n n = len(self)\n\n if n != len(other):\n raise(VetorError, \"Vetor dimensions are not equal\")\n\n v = zeros_como(self)\n\n for i in range(n):\n v[i] = self[i] + other[i]\n\n return v", "def sum_vectors(vector_1, vector_2):\n new_coordinates = []\n index = 0\n while index < vector_1.dimension:\n new_value = vector_1.coordinates[index] + vector_2.coordinates[index]\n new_coordinates.append(new_value)\n index += 1\n new_vector = Vector(new_coordinates)\n return new_vector", "def __init__(self, v1, v2):\n self.v1 = v1\n self.v2 = v2\n self.total = len(v1) + len(v2)\n self.cur = 0\n self.i = 0\n self.j = 0", "def add(a,b):\n return [a[0]+b[0],a[1]+b[1],a[2]+b[2],1.0]", "def _addVectors(X1,X2):\n _checkSize(X1,X2)\n return [ X1[i] + X2[i] for i in range(len(X1))]", "def _add_vtarg_and_adv(seg, gamma, lam):\n new = np.append(seg[\"new\"], 0)\n vpred = np.append(seg[\"vpred\"], seg[\"nextvpred\"])\n T = len(seg[\"rew\"])\n seg[\"adv\"] = gaelam = np.empty(T, 'float32')\n rew = seg[\"rew\"]\n lastgaelam = 0\n for t in reversed(range(T)):\n nonterminal = 1 - new[t + 1]\n delta = rew[t] + gamma * vpred[t + 1] * nonterminal - vpred[t]\n gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam\n seg[\"tdlamret\"] = seg[\"adv\"] + seg[\"vpred\"]\n del seg[\"nextvpred\"]", "def gauss_vect_mult(v):\n Jv = T.Rop(output, params, v)\n HJv = T.Rop(T.grad(opt_cost,output), output, Jv)\n JHJv = T.Lop(output, params, HJv)\n if not isinstance(JHJv,list):\n JHJv = [JHJv]\n JHJv = [a+ridge*b for a,b in zip(JHJv,v)]\n return JHJv", "def update_V(self, Vs, Vs_next, reward, alpha, gamma):\r\n return Vs + alpha * (reward + gamma * Vs_next - Vs)", "def proVec(*args):\r\n resultado = []\r\n i,j,k = (args[0][1] * args[1][2]) - (args[0][2] * args[1][1]) , ((args[0][0] * args[1][2]) - (args[0][2] * args[1][0])) * (-1) , (args[0][0] * args[1][1]) - (args[0][1] * args[1][0])\r\n resultado.append(i)\r\n resultado.append(j)\r\n resultado.append(k)\r\n return resultado", "def prod_value(self, lv, rv):", "def Afunc(n, Q, V, P, As=None):\n if As is None or type(As) is not dict:\n As = dict()\n if 0 not in As:\n A = P*V*Ufunc(0,Q,V,P)\n As[0] = A\n else: A = As[0]\n\n for j in range(1, n):\n if j not in As:\n A_new = P*V*Ufunc(j,Q,V,P)\n As[j] = A_new\n else:\n A_new = As[j]\n A += A_new\n return A, As", "def extforce (u, v):\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n u[i,j], v[i,j] = np.stack((u[i,j], v[i,j])) + dt * extacc\r\n\r\n return u, v", "def incrementSparseVector(v1, scale, v2):\n for index in v2:\n v1[index] += v2[index] * scale", "def V(I, dT, a, b, c, d, e, f):\n x1 = I # I\n x2 = dT # dT\n m = (a * x1 ** 2 + b * x1 + c)\n b = (d * x1 ** 2 + e * x1 + f)\n return m * x2 + b", "def V_particle_ablation(s_n, g_n, s_others, n_h1=64, n_h2=64):\n concated = tf.concat( [s_n, g_n, s_others], axis=1 )\n with tf.variable_scope(\"stage-2\"):\n h1 = tf.layers.dense(inputs=concated, units=n_h1, activation=tf.nn.relu, use_bias=True, name='V_h1')\n h2 = tf.layers.dense(inputs=h1, units=n_h2, activation=tf.nn.relu, use_bias=True, name='V_h2')\n out = tf.layers.dense(inputs=h2, units=1, activation=None, use_bias=False, name='V_out')\n return out", "def add_vectorlist(vectors):\n x, y, z = zip(*vectors)\n return sum(x), sum(y), sum(z)", "def funcv(x):\n f0 = x[0] ** 3.0 + x[1] + 3.0\n f1 = x[1] - 4.0 * x[0]\n return f0, f1", "def incrementSparseVector(v1, scale, v2):\n # BEGIN_YOUR_ANSWER (our solution is 2 lines of code, but don't worry if you deviate from this)\n for v2_key in v2:\n v1[v2_key] += scale * v2[v2_key]\n return v1\n # END_YOUR_ANSWER", "def v(x):\n return x*x", "def mult(v):\n\treturn sum([v[a][1] for a in range(len(v))])", "def add_vectors(v1,v2):\n \n #iterates through second dictionnary\n for key in v2:\n #if key is in v1 and v2 then we would add the values\n if key in v1:\n v1[key] = v1[key] +v2[key]\n #checks if the value at current key is 0\n if v1[key] == 0:\n # if value is 0 then we delete the key \n del v1[key]\n #if the key is not in v1 then we create a new key with the same value in v2\n elif key not in v1:\n v1[key] = v2[key]\n #checks if the value at current key is 0\n if v1[key] == 0:\n # if value is 0 then we delete the key \n del v1[key]", "def __iadd__(self, m):\n if self.__mm_type(m):\n ls=len(self)\n for i in self.desc():\n for j in range(ls):\n self.g_val(self.val(i,j)+m.val(i,j),i,j)\n return self", "def addInvariants(invar1, invar2):\n invar_sum= {}\n for key in invar1.keys():\n invar_sum[key] = np.array(np.add(invar1[key], invar2[key]))\n \n return(invar_sum)", "def scalar_vector_ext(alpha, v, a, b):\n return [alpha * v[0],\n alpha * v[0] * a + b]", "def heisen3_sum(v1: Vec3, v2: Vec3) -> Vec3:\n assert v1.shape == v2.shape and v1.shape == (3,)\n # first, do normal sum to get (x + x', y + y', z + z')\n out = v1 + v2\n # then add the xy' part to z + z'\n out[2] += v1[0] * v2[1]\n return out", "def add(n1, n2):\n return n1 + n2", "def EmitVSumReduce(self, reduce_type, elem_count, reduce_count, destinations,\n sources):\n if reduce_type is not 'u32':\n raise ArgumentError('Unsupported reduce: %s' % reduce_type)\n\n if (elem_count + 3) / 4 > len(destinations):\n raise ArgumentError('To few destinations: %d (%d needed)' %\n (len(destinations), (elem_count + 3) / 4))\n\n if elem_count * reduce_count > len(sources) * 4:\n raise ArgumentError('To few sources: %d' % len(sources))\n\n if reduce_count <= 1:\n raise ArgumentError('Unsupported reduce_count: %d' % reduce_count)\n\n sources = [_Cast(128, source) for source in sources]\n destinations = [_Cast(128, destination) for destination in destinations]\n\n while reduce_count > 1:\n if len(sources) % 2 == 1:\n sources.append(sources[-1])\n\n if reduce_count == 2:\n for i in range(len(destinations)):\n self.EmitVPadd(reduce_type, destinations[i], sources[2 * i],\n sources[2 * i + 1])\n return\n else:\n sources_2 = []\n for i in range(len(sources) / 2):\n self.EmitVPadd(reduce_type, sources[2 * i], sources[2 * i],\n sources[2 * i + 1])\n sources_2.append(sources[2 * i])\n reduce_count /= 2\n sources = sources_2", "def _append_value(self, v_values, next_value, v_idx=None, n_vals=1):\n for _ in range(n_vals):\n if v_idx:\n try:\n v_i = next(v_idx)\n except StopIteration:\n # Repeating commas are null-statements and can be ignored\n # Otherwise, we warn the user that this is a bad namelist\n if next_value is not None:\n warnings.warn(\n 'f90nml: warning: Value {v} is not assigned to '\n 'any variable and has been removed.'\n ''.format(v=next_value)\n )\n\n # There are more values than indices, so we stop here\n break\n\n v_s = [self.default_start_index if idx is None else idx\n for idx in v_idx.first]\n\n if not self.row_major:\n v_i = v_i[::-1]\n v_s = v_s[::-1]\n\n # Multidimensional arrays\n if not self.sparse_arrays:\n pad_array(v_values, list(zip(v_i, v_s)))\n\n # We iterate inside the v_values and inspect successively\n # deeper lists within the list tree. If the requested index is\n # missing, we re-size that particular entry.\n # (NOTE: This is unnecessary when sparse_arrays is disabled.)\n\n v_subval = v_values\n for (i_v, i_s) in zip(v_i[:-1], v_s[:-1]):\n try:\n v_subval = v_subval[i_v - i_s]\n except IndexError:\n size = len(v_subval)\n v_subval.extend([] for _ in range(size, i_v - i_s + 1))\n v_subval = v_subval[i_v - i_s]\n\n # On the deepest level, we explicitly assign the value\n i_v, i_s = v_i[-1], v_s[-1]\n try:\n v_subval[i_v - i_s] = next_value\n except IndexError:\n size = len(v_subval)\n v_subval.extend(None for _ in range(size, i_v - i_s + 1))\n v_subval[i_v - i_s] = next_value\n else:\n v_values.append(next_value)", "def dot_product(u, v):\n sum_of_products = 0\n if u!= None:\n if v!= None:\n for combo in zip(u, v):\n sum_of_products += (combo[0] * combo[1])\n return sum_of_products", "def test_amp_sums_can_be_simplified(free_alg):\n dr = free_alg\n v = dr.names.v\n n, i, j = symbols('n i j')\n x = IndexedBase('x')\n r = Range('D', 0, n)\n\n tensor = dr.sum((i, r), (j, r), i ** 2 * x[j] * v[j])\n res = tensor.simplify_sums()\n assert res == dr.sum((j, r), (\n n ** 3 / 3 - n ** 2 / 2 + n / 6\n ) * x[j] * v[j])", "def __add__(self, other):\n return Vec2d(self.v[0] + other[0], self.v[1] + other[1])", "def incrementSparseVector(v1, scale, v2):\n # BEGIN_YOUR_CODE (our solution is 2 lines of code, but don't worry if you deviate from this)\n for k in v2.keys():\n v1[k] += scale*v2[k]\n\n return v1\n # END_YOUR_CODE", "def __add__(self,other):\n self._obj['u'] += other._obj['u']\n self._obj['v'] += other._obj['v']\n return self._obj", "def obj(k_next) : \n \n if method==1 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*linear_interp(k_grid,v_update,k_next))\n elif method==2 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*quad_interp(k_grid,v_update,k_next))\n elif method==3 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*cubic_interp(k_grid,v_update,k_next))\n \n return value_vec", "def triple_step_simplified(n):\n\ta = 0\n\tb = 0\n\tc = 1\n\tfor i in range(n):\n\t\ttemp = a + b + c\n\t\ta, b, c = b, c, temp\n\treturn temp", "def __add__(self,that):\n return self.__opExpand2(that,np.add)", "def sum_squares(v):\n\treturn dot(v, v)", "def multi_2(cur,p,n):\n\tr=p\n\twhile n>1:\n\t\tr=sum(cur,r,p)\n\t\tn-=1\n\treturn r", "def dotproduct(v1, v2):\n\treturn sum(imap(operator.mul, v1, v2))", "def boundary_op_n(v):\r\n h = list(v.dic.keys())[0]\r\n p = len(h) - 1\r\n s = P_chains([],[])\r\n if (p != 0) and (isinstance(h, str) != True) and (isinstance(h, frozenset) != True) and (isinstance(h, ImmutableMatrix) != True):\r\n if (is_int(list(v.dic.keys())) == True):\r\n for u in v.dic.keys():\r\n c = 0\r\n for i in u: \r\n w = list(u)[:]\r\n w.remove(i)\r\n if (orientation_function(tuple(tuple_sorted(tuple(w))),tuple(w),p) == True):\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**c) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n else:\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**(c+1)) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n return s\r\n else:\r\n aux = P_chains([],[])\r\n D = {}\r\n ct = 0\r\n st = []\r\n for u in v.dic.keys():\r\n for x in u:\r\n if x not in st:\r\n st.append(x)\r\n for i in st:\r\n D[tuple([ct])] = i\r\n ct = ct + 1\r\n for u in v.dic.keys():\r\n w2 = []\r\n for x in u:\r\n for y in list(D.keys()):\r\n if (x == D[y]):\r\n w2.append(y)\r\n aux = aux + P_chains([tuple(w2)],[v.dic[u]]) \r\n v = aux\r\n for u in v.dic.keys():\r\n c = 0\r\n for i in u: \r\n w = list(u)[:]\r\n w.remove(i)\r\n if (orientation_function(tuple(tuple_sorted(tuple(w))),tuple(w),p) == True):\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**c) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n else:\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**(c+1)) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n s2 = P_chains([],[])\r\n for u in s.dic.keys():\r\n w2=[]\r\n for i in u:\r\n w2.append(D[i])\r\n s2 = s2 + P_chains([tuple(w2)],[s.dic[u]])\r\n \r\n return s2\r\n else:\r\n return s", "def f_v(self, v):\n\n return self.f(v[:, 0], v[:, 1], v[:, 2])", "def __iadd__( self, vector3 ):\n return self.add( vector3 )", "def compute(self, node, input_vals):\r\n assert len(input_vals) == 2\r\n return input_vals[0] + input_vals[1]\r\n #print(input_vals[0])\r\n #print(input_vals[1])\r\n #print(input_vals[0]+input_vals[1])\r", "def map_from_n_v_to_eta(n, v):\n return np.hstack((0.5 * n, -0.5 * np.ravel(np.linalg.inv(v))))", "def add_vectors(coord, vector):\n return tuple(c1+c2 for c1,c2 in zip(coord, vector))", "def sum(self, vector):\n\n # return (self.from_list([x+vector.vector[self.vector.index(x)]\n # for x in self.vector]))\n return Vector(self.x + vector.x, self.y + vector.y, self.z + vector.z)", "def test_tensors_can_substitute_vectors_simultaneously(\n free_alg, full_balance, full_simplify\n):\n\n dr = free_alg\n p = dr.names\n\n x = IndexedBase('x')\n i, j = p.i, p.j\n v = p.v\n\n orig = dr.einst(x[i, j] * v[i] * v[j])\n v_def = dr.sum(2 * v[i])\n\n dr.full_simplify = full_simplify\n res = orig.subst(v[i], v_def, full_balance=full_balance).simplify()\n dr.full_simplify = True\n\n expected = dr.einst(4 * x[i, j] * v[i] * v[j]).simplify()\n assert res == expected", "def append(self,x):\n if self.empty:\n self.vcount = 1\n self.vcountsq = 1\n self.vmin = x\n self.vmax = x\n self.vsum = x\n self.vmean = x\n self.vm2 = 0\n self.vm3 = 0\n self.vm4 = 0\n self.dirty = True\n else:\n nA = self.vcount\n nAA = self.vcountsq\n nX = nA+1\n nXX = nX**2\n nXXX = nX**3\n self.vcount = nX\n self.vcountsq = nXX\n\n if x < self.vmin:\n self.vmin = x\n if x > self.vmax:\n self.vmax = x\n\n delta = x-self.vmean\n delta2 = delta**2\n delta3 = delta**3\n delta4 = delta**4\n self.vmean += delta/nX # incremental mean (good for vectorial)\n self.vm3 += delta3*(nA*(nA-1))/nXX - 3*delta*self.vm2/nX\n self.vm4 += delta4*(nA*(nAA-nA+1))/nXXX + 6*delta2*(self.vm2)/nXX - 4*delta*self.vm3/nX\n # note is done at end\n self.vm2 += delta2*nA/nX # incremental quadratic for variance (good for vectorial)\n self.vsum += x\n\n self.dirty = True", "def add_n():\n pass", "def main():\n\ta = SparseVector(10)\n\tb = SparseVector(10)\n\ta.put(3, 0.50)\n\ta.put(9, 0.75)\n\ta.put(6, 0.11)\n\ta.put(6, 0.00)\n\tb.put(3, 0.60)\n\tb.put(4, 0.90)\n\tprint(\"a = {}\".format(a))\n\tprint(\"b = {}\".format(b))\n\tprint(\"a dot b = {}\".format(a.dot(b)))\n\tprint(\"a + b = {}\".format(a.plus(b)))", "def __add__(self, vs):\n ret = self.__elements\n for v in map(tuple, vs):\n if v not in map(tuple, ret):\n ret.append(np.array(v))\n return ret", "def add(n1, n2):\n return n1 + n2", "def dot_prod(u,v):\n each_product = []\n for i in range(len(u)):\n each_product.append(u[i] * v[i])\n return sum(each_product)", "def __add__(self,other):\n return Vector(self.x+other.x,self.y+other.y,self.z+other.z)", "def Values(self) -> _n_1_t_4:", "def concat(V, s):\n\n X = []\n for k in range(s):\n x = []\n for j in V:\n x.append(j[k])\n X.append(vertcat(*x))\n return X", "def v_o(A,vd):\n return A*vd" ]
[ "0.6679955", "0.6620289", "0.6549366", "0.6544268", "0.6518459", "0.64456105", "0.64288396", "0.6398406", "0.62491995", "0.6110092", "0.6052938", "0.60382", "0.6037448", "0.602034", "0.5956096", "0.5947813", "0.5876077", "0.58669853", "0.585338", "0.58445466", "0.58286864", "0.58242565", "0.5813848", "0.5803564", "0.57805663", "0.57799673", "0.5762379", "0.57493675", "0.5738828", "0.5735856", "0.5734413", "0.57336646", "0.57315713", "0.5723155", "0.5676924", "0.5663687", "0.5659237", "0.5656973", "0.5652614", "0.5642748", "0.56287736", "0.56021893", "0.55855197", "0.5580397", "0.5576613", "0.55691844", "0.5568684", "0.5561054", "0.5542047", "0.554024", "0.55331045", "0.553162", "0.55193466", "0.55172414", "0.55142313", "0.550288", "0.5501769", "0.549296", "0.5491228", "0.5488779", "0.54874593", "0.54844284", "0.54789317", "0.54726774", "0.54622567", "0.5459249", "0.54470557", "0.5445233", "0.54429895", "0.54398763", "0.54376006", "0.54373294", "0.5436098", "0.5433956", "0.5414798", "0.5411522", "0.54076606", "0.5401322", "0.5399093", "0.5398348", "0.5397711", "0.5392309", "0.53879523", "0.53856504", "0.53838503", "0.5378561", "0.5363257", "0.5353552", "0.53519636", "0.5331811", "0.53297514", "0.53206813", "0.531619", "0.53156614", "0.5312674", "0.53095907", "0.5304894", "0.5303143", "0.52992415", "0.5295994", "0.5292215" ]
0.0
-1
translates x by subtracting its mean from every observation (so that the result has a mean = 0)
def dev_mean(x): x_bar = mean(x) return [x_i - x_bar for x_i in x]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def de_mean(x):\n x_bar = mean(x)\n return [ x_i - x_bar for x_i in x]", "def de_mean(x):\n x_bar = mean(x)\n return [x_i - x_bar for x_i in x]", "def de_mean(x):\n x_bar = mean(x)\n return [x_i - x_bar for x_i in x]", "def de_mean(x):\n x_bar = mean(x)\n return [x_i - x_bar for x_i in x]", "def normalize(x):\n MEAN_VALUES = np.array([104, 117, 123])\n means = theano.shared(MEAN_VALUES.astype(\"float32\"))\n return x[:, ::-1, :, :] - means[np.newaxis, :, np.newaxis, np.newaxis]", "def normalize(self, X):\n return X - X.mean()", "def de_mean(xs: List[float]) -> float:\n x_bar = mean(xs)\n return [x - x_bar for x in xs]", "def demeaned(self):\n return self.data - self.mean", "def sample_zero_mean(x):\n return x - np.mean(x, axis=1).reshape((x.shape[0], 1))", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def forward(self, x, subtract_mean=True, clip=False):\n is_numpy = isinstance(x, np.ndarray)\n x = self._convert_to_torch(x)\n assert x.shape[-1] == self.mean.shape[-1], \\\n f'got shape={x.shape} but expected: {self.mean.shape}'\n if subtract_mean:\n x_new = (x - self.mean) / (self.std + self.eps)\n else:\n x_new = x / (self.std + self.eps)\n if clip:\n x_new = torch.clamp(x_new, -self.bound, self.bound)\n x_new = x_new.numpy() if is_numpy else x_new\n\n return x_new", "def inverse_transform(self, X):\n X = X.copy() # type: pd.DataFrame\n if self.with_std:\n X.loc[:, self._feature_mask_] *= self.scale_\n if self.with_mean:\n X.loc[:, self._feature_mask_] += self.mean_\n return X", "def _remove_baseline(x, axis=None):\n x -= np.mean(x, axis=axis, keepdims=True)\n return x", "def unstandardize(\n x: torch.Tensor,\n stats: Dict[str, torch.Tensor]) -> torch.Tensor:\n x_scaled = x * stats['std'] + stats['mean']\n return x_scaled", "def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X", "def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x", "def _de_transform(self, data):\r\n mean, variance = self._input_statistics.overall_feature_moments\r\n return data * variance + mean", "def feature_zero_mean(x, xtest):\n mu = np.mean(x, axis=0)\n return x - mu, xtest - mu", "def unstandardize(da: xr.DataArray, mean: xr.DataArray, std: xr.DataArray):\n return (std * da) + mean", "def standardize(x, axis=-1):\n stds_avg = np.std(x, axis=axis, keepdims=True)\n x -= np.mean(x, axis=axis, keepdims=True)\n x /= (stds_avg + 1e-8)\n return x", "def normalize(X):\n # z-score\n mean = np.mean(X, axis=(0, 1, 2, 3))\n std = np.std(X, axis=(0, 1, 2, 3))\n # avoid dividing zero by adding a very small number\n X = (X - mean) / (std + 1e-7)\n\n return X", "def transform ( self, X ):\n \n if not self.mean and not self.std:\n return X\n if self.mean:\n df_xf = X - self.df_means # Subtract means\n if self.std:\n is_zero = np.isclose ( self.df_std, 0 ) # If non-zero variance,\n with warnings.catch_warnings():\n warnings.simplefilter ( \"ignore\" )\n df_xf = np.where (\n is_zero, X, X / self.df_std\n ) # Ensure no divide by zero issues\n\n return df_xf\n # End transform()", "def inverse_transform(self, X):\n check_is_fitted(self)\n X = check_array(X, dtype=FLOAT_DTYPES)\n # From pls space to original space\n X_reconstructed = torch.matmul(X, self.x_loadings_.T)\n\n # Denormalize\n X_reconstructed *= self.x_std_\n X_reconstructed += self.x_mean_\n return X_reconstructed", "def _transform(self, X: Tensor) -> Tensor:\n if self.training and self.learn_bounds:\n if X.size(-1) != self.means.size(-1):\n raise BotorchTensorDimensionError(\n f\"Wrong input. dimension. Received {X.size(-1)}, \"\n f\"expected {self.means.size(-1)}\"\n )\n self.means = X.mean(dim=-2, keepdim=True)\n self.stds = X.std(dim=-2, keepdim=True)\n\n self.stds = torch.clamp(self.stds, min=self.min_std)\n if hasattr(self, \"indices\"):\n X_new = X.clone()\n X_new[..., self.indices] = (\n X_new[..., self.indices] - self.means[..., self.indices]\n ) / self.stds[..., self.indices]\n return X_new\n return (X - self.means) / self.stds", "def untruncatedMean(self, x):\n self.raiseAnError(NotImplementedError,'untruncatedMean not yet implemented for ' + self.type)", "def standardize(x, mean_x=None, std_x=None):\n if mean_x is None:\n mean_x = np.mean(x, axis=0)\n x = x - mean_x\n if std_x is None:\n std_x = np.std(x, axis=0)\n x[:, std_x > 0] = x[:, std_x > 0] / std_x[std_x > 0]\n\n tx = np.hstack((np.ones((x.shape[0], 1)), x))\n return tx, mean_x, std_x", "def transform(self, data):\n data -= self.mean\n if 0.0 in self.std:\n self.std = np.where(self.std == 0.0, 1.0, self.std)\n data /= self.std\n return data", "def _untransform(self, X: Tensor) -> Tensor:\n X_new = X.clone()\n X_new[..., self.indices] = 10.0 ** X_new[..., self.indices]\n return X_new", "def inverse_transform(self, X):\n X = X * self.mad_\n return X + self.med_", "def normalise(x):\n return (x - jnp.min(x)) / (jnp.max(x) - jnp.min(x))", "def inverse_transform(self, X):\n X = np.asarray(X, dtype=np.float64)\n X -= self.min_\n X /= self.scale_\n return X", "def normalize(x):\n\n return (x - x.values.min()) / (x.values.max() - x.values.min())", "def zero_mean(x):\n\treturn np.zeros((x.shape)[0])", "def inverse_transform(self, X):\n # No warning for y, since there's no y variable.\n # This correpsonds to function signature in scikit-learn's code base\n X = X.copy() # type: pd.DataFrame\n X.loc[:, self._feature_mask_] *= self.scale_\n X.loc[:, self._feature_mask_] += self.min_\n return X", "def _untransform(self, X: Tensor) -> Tensor:\n pass # pragma: no cover", "def unscale_data(self, data):\n return (data + self.mean)*self.std", "def normalize(x):\n return (x - math_ops.reduce_min(x)) / (math_ops.reduce_max(x) - math_ops.reduce_min(x))", "def denormalize(x, std, mean):\n out = x * std + mean\n return out.clamp(0, 1)", "def _transform(self, data):\r\n mean, variance = self._input_statistics.overall_feature_moments\r\n return (data - mean) / variance", "def standardise(x):\n mean_x = np.mean(x, axis=0)\n x = x - mean_x\n std_x = np.std(x, axis=0)\n x = x / std_x\n return x, mean_x, std_x", "def __call__(self, x: np.ndarray):\n out_fst = self.fst(x)\n out_snd = self.snd(x)\n diff_max = [np.max(np.abs(y_fst - y_snd))\n for y_fst, y_snd in zip(out_fst, out_snd)]\n self.max = np.concatenate([self.max, [diff_max]], axis=0)\n diff_mean = [np.mean(np.abs(y_fst - y_snd))\n for y_fst, y_snd in zip(out_fst, out_snd)]\n self.mean = np.concatenate([self.mean, [diff_mean]], axis=0)", "def _normalize(self, x):\n # TODO: imagenet normalization\n\n return x", "def adjust(data):\n mu = mean(data)\n return mu, map(lambda x: (x-mu), data)", "def normalization(x, x_min=-5.12, x_max=5.12):\n for i in range(len(x.vect)):\n x.vect[i] = x_min + x.vect[i]*(x_max-x_min)\n return x", "def inverse_transform(self, x):\n raise NotImplementedError()", "def standardize(\n x: torch.Tensor,\n stats: Dict[str, torch.Tensor]) -> torch.Tensor:\n\n x_scaled = (x - stats['mean']) / stats['std']\n return x_scaled", "def normalize(self, x):\n self.max = x.max()\n self.min = x.min()\n return (2 * (x - x.min()) / (x.max() - x.min()) - 1)", "def inverse_transform(self, X, copy=None):\n \n check_is_fitted(self, 'scale_')\n\n copy = copy if copy is not None else self.copy\n\n #X = check_array(X, copy=copy, warn_on_dtype=True,\n # estimator=self, dtype=FLOAT_DTYPES,\n # force_all_finite='allow-nan')\n\n if self.with_mean:\n X += self.mean_\n if self.with_std:\n X *= self.scale_\n return X", "def standardize(x, mean=None, std=None): \n \n mean = mean if mean is not None else x.mean(axis=0)\n std = std if std is not None else x.std(axis=0) \n \n return (x - mean) / std, mean, std", "def unexplained_variance(x, x_proj):\n res = poincare.distance(x, x_proj) ** 2\n return torch.mean(res).item()", "def transform(self, x):\n \n return x.apply(lambda x_i: self.d[x_i] if x_i in self.d else 0)", "def transform(self, x):\n \n return x.apply(lambda x_i: self.d[x_i] if x_i in self.d else 0)", "def _inverse_transform(self, x):\n if x.atleast_2d().shape[1] != self.w.size:\n raise ValueError(\"array to revert must have {:} \"\n \"features (columns).\".format(self.w.size))\n\n v = (x - self.b).atleast_2d()\n\n v[:, self.w != 0] /= self.w[self.w != 0] # avoids division by zero\n\n return v.ravel() if x.ndim <= 1 else v", "def _normalize(x):\n tol = 1e-10\n dims = x.shape\n\n x = x.flatten()\n inverse = (np.sum(x**2) + tol) ** -.5\n x = x * inverse\n x = np.reshape(x, dims)\n\n return x", "def _normalize_feature(self, feature):\n\n for ic in range(self.data_shape[0]):\n feature[ic] = (feature[ic] - self.feature_mean[ic]\n ) / self.feature_std[ic]\n return feature", "def unwhiten_back(self, sample):\n sample = sample*self.Y_std.unsqueeze(1) + self.Y_mean.unsqueeze(1)\n return sample", "def unwhiten_back(self, sample):\n sample = sample*self.Y_std.unsqueeze(1) + self.Y_mean.unsqueeze(1)\n return sample", "def inverse_transform(self, X, copy=True):\n check_is_fitted(self)\n\n X = check_array(X, copy=(copy and self.whiten), dtype=[np.float64, np.float32])\n X = np.dot(X, self.mixing_.T)\n if self.whiten:\n X += self.mean_\n\n return X", "def standardize(X):\n X_std = X\n mean = X.mean(axis=0)\n std = X.std(axis=0)\n for col in range(np.shape(X)[1]):\n if std[col]:\n X_std[:, col] = (X_std[:, col] - mean[col]) / std[col]\n # X_std = (X - X.mean(axis=0)) / X.std(axis=0)\n return X_std", "def standardize(X):\n mu = X.mean(axis=0, keepdims=True)\n s = X.std(axis=0, keepdims=True)\n return (X-mu)/s", "def substract_mean(data, mean_im):\n\n\tno_of_images = len(data)\n\tfor i in xrange(no_of_images):\n\t\tdata[i] = data[i] - mean_im\n\n\treturn data", "def standardize(x, mean_x=None, std_x=None):\n if mean_x is None:\n mean_x = np.mean(x,axis=0)\n x = x - mean_x\n if std_x is None:\n std_x = np.std(x,axis=0)\n x = x / std_x\n return x, mean_x, std_x", "def unnormalize(tensor, mean, std):\n for t, m, s in zip(tensor, mean, std):\n t.mul_(s).add_(m)\n return tensor", "def normalize(x):\n return (x + 1e-10) / (K.sqrt(K.mean(K.square(x))) + 1e-10)", "def inverse_transform(self, X):\n\n pass # pragma: no cover", "def _untransform(self, X: Tensor) -> Tensor:\n if len(self.batch_shape) > 0:\n if self.batch_shape != X.shape[-2 - len(self.batch_shape) : -2]:\n raise BotorchTensorDimensionError(\n \"The right most batch dims of X must match self.batch_shape: \"\n f\"({self.batch_shape}).\"\n )\n X_tf = X.clone()\n k = Kumaraswamy(\n concentration1=self.concentration1, concentration0=self.concentration0\n )\n # unnormalize from [eps, 1-eps] to [0,1]\n X_tf[..., self.indices] = (\n (k.icdf(X_tf[..., self.indices]) - self._X_min) / self._X_range\n ).clamp(0.0, 1.0)\n return X_tf", "def normalise(x):\n x = np.copy(x)\n n_cols = x.shape[1]\n for col_index in range(n_cols):\n col = x[:, col_index]\n factor = np.max(col)\n x[:, col_index] = col / factor\n\n return x", "def center(x):\n return x - x.mean()", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def normalize_scl(self,x):\n max_val = np.max(x['data'][0])\n last_val = x['data'][0][-1]\n return last_val/max_val", "def msub(trace):\n \n return(trace - np.mean(trace))", "def inverse_transform(self, data):\n transed = self.restoreDim(data)\n mean = torch.zeros(transed.size())\n std = torch.ones(transed.size())\n if args.cuda:\n mean = mean.cuda()\n std = std.cuda()\n mean[...,0] = self.mean0\n mean[...,1] = self.mean1\n std[...,0] = self.std0\n std[...,1] = self.std1\n transformed = torch.add(torch.mul(transed, std), mean)\n del mean, std\n return transformed.permute(1,0,3,2)", "def inverse_normal_transformation(x, c=3/8):\n r = scipy.stats.rankdata(x, \"average\")\n return scipy.stats.norm.ppf((r - c) / (len(x) - 2 * c + 1))", "def partial_flatten_and_normalize(x):\n x = np.reshape(x, (x.shape[0], -1))\n return (x - np.mean(x)) / np.std(x)", "def normalize(x, x_max, x_min):\n return (x - x_min) / (x_max - x_min)", "def subMeanAll(data=None):\n datamean = data.mean(axis = 0)\n data[:,3:] = data[:,3:] - datamean[3:]\n return data", "def normalize_features(X):\n std = X.std(axis=0)\n std = np.where(std == 0, 1, std) # to avoid division by zero\n x_normed = (X - X.mean(axis=0)) / std\n return x_normed", "def normalize(x, min_x, max_x):\n\treturn (x - min_x) / (max_x - min_x)", "def getTranslatedAndInverseScaledMean(self, x, y):\n if self.name == 1:\n return self.meanLandmark.scale(self.meanScale * 0.8).translate(x, y)\n\n return self.meanLandmark.scale(self.meanScale).translate(x, y)", "def substract_mean(image_array, mean):\n image_array = image_array.astype(np.float32)\n image_array[:, :, 0] -= mean[0]\n image_array[:, :, 1] -= mean[1]\n image_array[:, :, 2] -= mean[2]\n return image_array", "def mean_subtract(dataset):\n data = [dataset[i] for i in range(len(dataset))]\n data_numpy = [dataset[i].numpy() for i in range(len(dataset))]\n\n # mean\n mean = np.mean(data_numpy)\n\n # standard deviation\n std = np.std(data_numpy)\n\n # perform mean subtract\n new_dataset = []\n for i in range(len(dataset)):\n data[i] -= mean\n data[i] /= std\n new_dataset.append(data[i])\n return new_dataset, mean", "def inverse_transform ( x ):\n x_out = x*1.\n # Cab, posn 1\n x_out[1] = -100.*np.log ( x[1] )\n # Cab, posn 2\n x_out[2] = -100.*np.log ( x[2] )\n # Cw, posn 4\n x_out[4] = (-1./50.)*np.log ( x[4] )\n #Cm, posn 5\n x_out[5] = (-1./100.)*np.log ( x[5] )\n # LAI, posn 6\n x_out[6] = -2.*np.log ( x[6] )\n # ALA, posn 7\n x_out[7] = 90.*x[7]\n return x_out", "def __call__(self, x: float):\n out = self._n2 - x\n out = np.where(np.abs(out) > 1.0e-12, out, np.NaN)\n out = self._multiplicity / out\n\n return np.sum(out, axis=0) - self._normalization", "def __call__(self, x: float):\n out = self._n2 - x\n out = np.where(np.abs(out) > 1.0e-12, out, np.NaN)\n out = self._multiplicity / out\n\n return np.sum(out, axis=0) - self._normalization", "def reconstruct(self, x):\n return self.inverse_transform(self.transform(x))", "def mean_scale(self, x: torch.Tensor):\n\n return self._mean_scale(x)", "def normalize(self,x,xmin,xmax):\n return (x-xmin)/(xmax-xmin)", "def convertHermiteToNormal(self,x):\n return self.sigma*x+self.untruncatedMean()", "def transform(self, y):\n if isinstance(y, np.ndarray) and y.ndim == 2:\n T, n = y.shape\n x = self._E_mean([y - self.mean_])[0]\n x = x[0].reshape(T, self.n_factors)\n else:\n x = self._E_mean([yi - self.mean_ for yi in y])[0]\n x = [xi.reshape(yi.shape[0], self.n_factors) for xi, yi in zip(x, y)]\n return x", "def untransform(self, X: Tensor) -> Tensor:\n for tf in reversed(self.values()):\n X = tf.untransform(X)\n return X", "def normalize_mean(dataset):\n normalized_dataset = np.array(dataset)\n return normalized_dataset - np.mean(normalized_dataset)", "def inverse_transform(self, data):\n mean = torch.ones(data.size()) * self.mean0\n std = torch.ones(data.size()) * self.std0\n if args.cuda:\n mean = mean.cuda()\n std = std.cuda()\n transformed = torch.add(torch.mul(data, std), mean)\n del mean, std\n return transformed.permute(1,0,2)", "def remove_mean(self, axes=None):\n axes = self._get_axes_numbers(axes)\n out = self\n if 0 in axes:\n out = self - self.mean(0)\n if 1 in axes:\n out = (self.T - self.mean(1)).T\n return out", "def standardize(self, x):\n if not self.image_resample:\n x = to_shape(x, self.image_shape, constant_values=-1024)\n elif self.image_resample:\n x = resample(x, self.image_shape)\n\n if self.preprocessing_function:\n x = self.preprocessing_function(x)\n if self.voxelwise_normalization:\n if self.voxel_bounds is not None:\n x = voxelwise_normalize(x, self.voxel_bounds)\n if self.voxelwise_center:\n if self.voxel_mean is not None:\n x -= self.voxel_mean\n if self.voxelwise_std_normalization:\n x /= (self.voxelwise_std + 1e-7)\n if self.samplewise_center:\n x -= np.mean(x, axis=self.channel_axis, keepdims=True)\n if self.samplewise_std_normalization:\n x /= (np.std(x, axis=self.channel_axis, keepdims=True) + 1e-7)\n return x", "def _partial_flatten_and_normalize(x):\n x = np.reshape(x, (x.shape[0], -1))\n return (x - np.mean(x)) / np.std(x)" ]
[ "0.81177664", "0.80826604", "0.80826604", "0.80826604", "0.7419382", "0.71917254", "0.70956707", "0.6826369", "0.6820255", "0.6728887", "0.6728887", "0.6612345", "0.6566342", "0.6563226", "0.6550982", "0.6509516", "0.6455834", "0.63640994", "0.63588613", "0.6349324", "0.63412935", "0.6332519", "0.6321751", "0.6307867", "0.62869465", "0.6263456", "0.6255304", "0.62326247", "0.6226396", "0.62217027", "0.62019926", "0.61769426", "0.6175598", "0.61720186", "0.61698973", "0.6167003", "0.61660534", "0.6165153", "0.615281", "0.61246747", "0.61063313", "0.61020243", "0.6097644", "0.60976094", "0.6089992", "0.60855556", "0.60831463", "0.60778475", "0.60689014", "0.6064404", "0.60612524", "0.60416037", "0.60416037", "0.60407543", "0.6040152", "0.60344255", "0.60235256", "0.60235256", "0.6009611", "0.60055137", "0.5988032", "0.5981243", "0.59799564", "0.5969473", "0.59464914", "0.593758", "0.59310794", "0.5928835", "0.5924795", "0.59041864", "0.59041864", "0.59041864", "0.59041864", "0.59041864", "0.58969545", "0.5896149", "0.5890543", "0.58870673", "0.5885076", "0.5874957", "0.58692616", "0.5868604", "0.5864678", "0.5858672", "0.58558893", "0.5845146", "0.5843636", "0.5831357", "0.5831357", "0.58305305", "0.58254915", "0.58253217", "0.5816864", "0.5799742", "0.57931983", "0.57928145", "0.5792619", "0.57736415", "0.57716614", "0.5770878" ]
0.68667185
7
assumes x has at least two elements
def variance(x): n = len(x) deviations = dev_mean(x) return sum_of_squares(deviations) / (n-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intersect(x):\n if len(x) < 2:\n return x\n\n # Make sure everybody have the same shape\n first_shape = tuple(x[0].shape)\n for pixmap in x[1:]:\n if first_shape != tuple(pixmap.shape):\n return []\n\n return [(np.prod(np.array(x), axis=0) > 0).astype(int)]", "def _checkSize(X1,X2):\n \n if len(X1) != len(X2):\n raise ValueError, 'Lists are differnt lengths'", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n X_union = (X == 0) | (X == 1)\n for k in range(2, kwargs[\"n\"] + 1):\n X_union = X_union | (X == k)\n\n assert (\n X_union.all()\n ), f\"x should be equal to integer from 0 to {kwargs['n']} (inclusive).\"", "def isvect(x):\n return isinstance(x,list) and len(x) == 4 and isgoodnum(x[0]) and isgoodnum(x[1]) and isgoodnum(x[2]) and isgoodnum(x[3])", "def any_two(iterable):\n return (len([i for i in iterable if i]) > 1)", "def friend(x):\n return [f for f in x if len(f) == 4]", "def test_check_X_too_many_dims():\n with pytest.raises(ValueError):\n check_X(np.ones((5,4,3)))", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n X_union = (X == 0) | (X == 1)\n for k in range(2, kwargs[\"k\"]):\n X_union = X_union | (X == k)\n\n assert (\n X_union.all()\n ), f\"x should be equal to integer from 0 to {kwargs['k']} (exclusive).\"", "def how_many(e, x):\n return count(np.asarray(x) == e)", "def add_ignore_empty(x, y):\n\n def _ignore(t):\n return t is None or (isinstance(t, tuple) and len(t) == 0)\n\n if _ignore(y):\n return x\n elif _ignore(x):\n return y\n else:\n return x + y", "def ensure_length(x, length):\n x = nest.flatten(x)\n if len(x) == 1:\n x *= length\n\n return x", "def n_wise(x: List[Any], size: Optional[int] = 2) -> Iterable:\n\n iterator = iter(x)\n\n return iter(lambda: tuple(islice(iterator, size)), ())", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert ((X == 0) | (X == 1)).all(), \"x should be equal to 0 or 1.\"", "def countX(lst, x):\n return lst.count(x)", "def countX(lst, x):\n return lst.count(x)", "def obs_with_data(x):\n num_toks = np.sum(x,axis=1)\n has_data = num_toks > 0\n return has_data", "def __contains__(self, x):\n # if not isinstance(x, int) or not x % 2:\n if not (isinstance(x, int) and (x % 2)):\n return False\n return True", "def one(xs):\n ret = False\n for x in xs:\n if x:\n if ret:\n return False\n ret = True\n return ret", "def multiple_elements(self) -> bool:\n return self.max is None or self.max > 1", "def retrun_1(x):\n ret = np.ones(len(x))\n return ret", "def test_two_items() -> None:\n a: list[int] = [40, 29]\n assert only_evens(a) == [40]\n assert sub(a, 0, 1) == [40]\n assert concat(a, [2, 30, 17]) == [40, 29, 2, 30, 17]", "def check_consistent_X(self, X):\n # X must be ndarray-type\n if not isinstance(X, np.ndarray):\n X = np.array(X)\n\n return X", "def __contains__(self,x):\n return 0 <= x < len(self)", "def sift_rest(x):\n return True", "def contains(self, x):\n # need more to assure its a real SSP - ie on right torus\n return (len(x) == self._shape[0])", "def get_odd_elements(x, start):\r\n result = []\r\n while len(result) is not x:\r\n if start % 2 != 0:\r\n result.append(start)\r\n start += 1\r\n return result", "def unique(x):\n try:\n tmp = x.flatten()\n if tmp.size == 0:\n return tmp\n tmp.sort()\n idx = concatenate(([True],tmp[1:]!=tmp[:-1]))\n return tmp[idx]\n except AttributeError:\n items = list(set(x))\n items.sort()\n return asarray(items)", "def check_1d(x, name):\n\n x = asarray(x)\n if size(x) == 1:\n x = asarray([x])\n if x.ndim == 2:\n raise Exception(\"Property: %s must be one-dimensional\" % name)\n x = x.flatten()\n\n return x", "def _maybe_repeat(self, x):\n if isinstance(x, list):\n assert len(x) == self.n\n return x\n else:\n return [x] * self.n", "def ifidentity(x):\n for idx,val in enumerate(x):\n if idx+ 1 != val:\n return False\n return True", "def expected(x, y):", "def expected(x, y):", "def expected(x, y):", "def is_vec(x):\n return x.ndim == 1 or (x.ndim == 2 and \n (x.shape[0] == 1 or x.shape[1] == 1))", "def isscalar(x):\n arrayed_x = asarray(x)\n return asarray(x).ndim == 0 and arrayed_x.dtype != 'object'", "def test_check_x(self):\n r1 = Rectangle(10, 2)\n self.assertEqual(r1.x, 0)\n\n r2 = Rectangle(2, 10, 6)\n self.assertEqual(r2.x, 6)\n\n r3 = Rectangle(5, 2, 3, 9, 12)\n self.assertEqual(r3.x, 3)\n\n r4 = Rectangle(5, 2, 0, 3, 12)\n self.assertEqual(r4.x, 0)", "def filtered_xyz(self) -> tuple[int, int, int]:", "def contains(self, x):\n return (isinstance(x, int) and x >= 0 and x < self._dim)", "def biterr(x, y):\n # Error checking\n assert_ndarray(x)\n assert_ndarray(y)\n assert_one_dimension(x)\n assert_one_dimension(y)\n if len(x) != len(y):\n raise ValueError(\"x and y must have same length\")\n\n num_errors = 0\n for ii in np.arange(len(x)):\n if x[ii] != y[ii]:\n num_errors += 1\n\n return num_errors", "def test_ones(self):\n argument = [1,1,1]\n expected = [0,2,2]\n double_preceding(argument)\n self.assertEqual(expected, argument, \"The list contains one 3 item.\")", "def X(self,value: list)->None:", "def constraints(self, x):\n pass", "def _check_triple(arr):\n\n count_x = 0\n count_o = 0\n count_empty = 0\n for idx, el in enumerate(arr):\n if el == VALUES.X:\n count_x += 1\n elif el == VALUES.O:\n count_o += 1\n elif el == VALUES.EMPTY:\n count_empty += 1\n if count_x == 2 and count_o == 0 and count_empty == 1:\n return VALUES.X, arr.index(VALUES.EMPTY)\n elif count_o == 2 and count_x == 0 and count_empty == 1:\n return VALUES.O, arr.index(VALUES.EMPTY)\n else:\n return None, -1", "def has_x(self):\n return any(map(lambda s: s.is_x, self))", "def checkRow(self, x):\n used = []\n for y in range(len(self.board[0])):\n cur = self.board[x][y]\n if cur not in used:\n if cur !=0:\n used += [cur]\n else:\n return False\n return True", "def test__tuple_raise_dimension_error(N):\n dummy_kernel_size = None\n\n with pytest.raises(ValueError):\n utils._tuple(dummy_kernel_size, N)", "def only_evens(x: list[int]) -> list[int]:\n i: int = 0\n evens = list()\n while (i < len(x)):\n if x[i] % 2 == 0: \n evens.append(x[i])\n i += 1\n else:\n i += 1\n return evens", "def __check_2d_and_reshape(X):\n if len(X.shape) == 1:\n X = np.reshape(X, (-1, X.shape[0]))\n return X", "def __len__(self):\n return len(self.x)", "def compute_dims_from_values(self, x):\n return ((),)", "def hasADouble(x, aux):\n for y in aux:\n if y[:-1] == x[:-1] and y[-1] != x[-1]:\n return True\n return False", "def true_g(x):\n obj1 = x[0]**2\n return (obj1,)", "def _indexable(X, y):\n result = [_validate_X(X), _validate_y(y)]\n check_consistent_length(*result)\n return result", "def union(x):\n if len(x) < 2:\n return x\n\n # Make sure everybody have the same shape\n first_shape = tuple(x[0].shape)\n for pixmap in x[1:]:\n if first_shape != tuple(pixmap.shape):\n return []\n\n return [np.bitwise_or.reduce(np.array(x).astype(int))]", "def _check_input(self, X):\n symbols = np.concatenate(X)\n if len(symbols) == 1: # not enough data\n raise ValueError(\"expected at least 1 observation \"\n \"but none found.\")\n elif (symbols < 0).any(): # contains negative integers\n raise ValueError(\"expected non-negative features \"\n \"for each observation.\")\n elif X.shape[1] > 1: # contains to many features\n raise ValueError(\"expected only 1 feature but got {0} \"\n \"for each observation.\".format(X.shape[1]))\n else:\n return True", "def qualify_octave(x):\n\n if len(x) == 3:\n return x\n\n if len(x) == 2:\n return (x[0], x[1], 0)\n\n raise ValueError(\"qualify_octave accepts tuples of two or three values\")", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert (X > 0).all() & isinteger(X), \"x should be greater then 0 and integer.\"", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert (X >= 0).all() & isinteger(\n X\n ), \"x should be greater or equal to 0 and integer.\"", "def tuples_2_bool(tuples, x):\n if np.ndim(tuples) == 1:\n tuples = [tuples]\n\n out = np.zeros(x.size, dtype=bool)\n for l, u in tuples:\n out[(x > l) & (x < u)] = True\n return out", "def parseNout(self, x):\r\n list =[]\r\n for i in range(len(self.matr[x])):\r\n if self.matr[x][i] :\r\n list.append(i)\r\n return list", "def test_case_07_side_too_small(self):\n self.__assert_equals_test_case([(-2, 2, 3), (0, 2, 3)], 'InvalidInput')", "def is_row_vec(x):\n return x.ndim == 2 and x.shape[0] == 1", "def _ensure_iterable(x):\n if isinstance(x[0], Iterable):\n if len(x) > 1:\n raise TypeError(\"Either Iterable or variable argument list expected\")\n return x[0]\n else:\n return x", "def join (xl,xr,y=1):\n\n X = (xl,xr)\n Y = split(y)\n while not seq(X,Y):\n y *= 2\n if sle(Y,X): y += 1\n Y = split(y)\n return y", "def is_2dlist(x):\n if not isinstance(x, list):\n return False\n if len(x) == 0:\n return True\n\n return all(isinstance(item, list) for item in x)", "def only_even(mixed_list):", "def part_one():\n return len(numpy.where(grid > 1)[0])", "def is_constant(x):\n x = np.array(x)\n result = np.all(x == x[1])\n return result", "def _inner_preduce(x):\n if len(x) <= 2:\n return _sfn(x)\n paired_x = partition_all(2, x)\n new_x = tuple(pool.map(_sfn, paired_x))\n return _inner_preduce(new_x)", "def test_ndim_fail():\n lons = lats = np.array([0]).reshape(-1, 1, 1, 1)\n emsg = \"Require at most 3-D\"\n with pytest.raises(ValueError, match=emsg):\n _ = to_cartesian(lons, lats)", "def checkLists(self):\n self.x = self.checkList(self.x)\n self.y = self.checkList(self.y)\n return", "def test__chk_asarray(self):\r\n\r\n exp = (array([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]), 0)\r\n obs = _chk_asarray([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]], 0)\r\n assert_almost_equal(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])", "def mean_if_many(x):\n return list(x)[0] if len(x) == 1 else np.mean(x)", "def get_shape(x):\n if isinstance(x, list) and len(x) > 0:\n shapes = [get_shape(subx) for subx in x]\n if any([s != shapes[0] for s in shapes[1:]]):\n raise ValueError('Parameter dimension not consistent: {}'.format(x))\n return (len(x), ) + shapes[0]\n else:\n if hasattr(x, '_shape_tuple'):\n return x._shape_tuple() # method to return the shape as a tuple\n elif hasattr(x, 'shape'):\n return tuple(x.shape)\n else:\n return ()", "def _check_input_timeseries(x: np.ndarray) -> np.ndarray:\n if not isinstance(x, np.ndarray):\n raise ValueError(\"The input time series must be a numpy array.\")\n if x.ndim <= 0 or x.ndim >= 4:\n raise ValueError(\n \"The input time series must have more than 0 dimensions and\"\n \"less than 4 dimensions.\"\n )\n if x.ndim == 3:\n return x[0]\n return x", "def getNumElements(self):\n return 1", "def is_vector(x):\r\n return len(x.shape) == 1", "def initial_conditions_2(x):\n u1 = 1\n return u1 if 1 <= x <= 2 else 0", "def add_elements(arr, k):\n\n return sum(elem for elem in arr[k] if len(str(elem)) <= 2)", "def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])", "def __contains__(self, x):\n indexes = self.get_indexes(x)\n return self.sketch[indexes] > 0", "def check_if_double(tile: list):\n return tile[0] == tile[1]", "def has_equal_values_vec(x):\n return jnp.all(x == x[0])", "def test_multi_same(nothing_list):\n result = multi_same_list(nothing_list)\n assert result[1][2] == 0\n assert result[0][2] == 0", "def contains(self, x):\n raise NotImplementedError", "def mystery_1a_flat(x: int, y: set[int]) -> str:\n if x > 1 and sum({n ** 2 for n in y}) >= 10:\n return 'Mario'\n else:\n return 'David'", "def ispoint(x):\n if isvect(x) and x[3] > 0.0:\n return True\n return False", "def test_one_element_input(self):\n res = merge_sort([1])\n self.assertEqual(res, [1])", "def _assert_non_empty(iterable):\n first_elem = six.next(iterable, None)\n assert first_elem is not None, first_elem\n return itertools.chain([first_elem], iterable)", "def is_empty(x):\n\n if not x:\n return None\n else:\n return x", "def is_3dlist(x):\n if not isinstance(x, list):\n return False\n if len(x) == 0:\n return True\n for sub_x in x:\n if not is_2dlist(sub_x):\n return False\n\n return True", "def count(cls):\n return lambda x,y: ((type(x)==int) and [x+1] or ((y==None) and [1] or [2]))[0]", "def check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tcheck_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\t#check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)", "def is_line_vec(x):\n return x.ndim == 1", "def identity_filter(element_tuple):\r\n\treturn element_tuple", "def test_02_this_step_will_fail(self):\n\n self.assertIn(5, arr)", "def softplus_list(x_):\n y_ = [np.log(1 + np.exp(-np.abs(x_[0]))) + np.maximum(x_[0], 0)]\n for i in range(1, len(x_)):\n if x_[i] is not []:\n y_ = y_ + [np.log(1 + np.exp(-np.abs(x_[i]))) + np.maximum(x_[i], 0)]\n return y_", "def test_list_size_one_no_even(self):\n argument = [1]\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def _isscalar(x):\n return np.isscalar(x) or hasattr(x, \"shape\") and x.shape == ()", "def is_unique(x):\n return len(set(x)) == len(x)", "def check(x):\n rotation_a = rotation_b = 0\n for a, b in zip(A, B):\n if x != a and x != b:\n return -1\n elif x != a:\n # x != a and x == b\n rotation_a += 1\n elif x != b:\n # x == a and x != b\n rotation_b += 1\n # else\n # x == a and x == b\n # do nothing since no rotation\n\n # Minimum rotation to have all elements equals x in A or B\n return min(rotation_a, rotation_b)" ]
[ "0.59251857", "0.5914457", "0.58948374", "0.5855917", "0.58345103", "0.5804906", "0.5720883", "0.57127684", "0.56765395", "0.56101656", "0.5559036", "0.5556334", "0.55350626", "0.5518712", "0.5518712", "0.55062586", "0.5500494", "0.54991597", "0.54929", "0.54871774", "0.54791915", "0.54639775", "0.5451748", "0.54504937", "0.54479945", "0.5446888", "0.5415532", "0.5411754", "0.54107875", "0.5397021", "0.53744614", "0.53744614", "0.53744614", "0.535892", "0.5356629", "0.5354666", "0.5316603", "0.53082526", "0.5306892", "0.5302941", "0.53025204", "0.53017986", "0.5292622", "0.5285412", "0.52816063", "0.5270216", "0.5262286", "0.52578986", "0.5248968", "0.5245707", "0.52414995", "0.52337515", "0.5232083", "0.5230217", "0.5227575", "0.5215988", "0.5204702", "0.51800543", "0.516497", "0.51516694", "0.5149769", "0.51383007", "0.51271236", "0.5114233", "0.5110442", "0.51075774", "0.5100892", "0.5092254", "0.50885844", "0.50885123", "0.5088303", "0.5076657", "0.5070776", "0.50692385", "0.5064718", "0.50592285", "0.50571465", "0.5051669", "0.50507045", "0.50503224", "0.5046895", "0.5041962", "0.5041373", "0.5036838", "0.5035358", "0.50310636", "0.5026658", "0.5026424", "0.50233215", "0.5021376", "0.50206786", "0.5014719", "0.50131166", "0.50107455", "0.500721", "0.50060856", "0.50058156", "0.5005002", "0.50004125", "0.4997711", "0.4987244" ]
0.0
-1
returns the num_columns x num_columns matrix whose (i,j)th entry is the correlation between columns i and j of data
def correlation_matrix(data): _, num_columns = shape(data) def matrix_entry(i, j): return correlation(get_column(data, i), get_column(data, j)) return make_matrix(num_columns, num_columns, matrix_entry)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def correlation_matrix(data):\n\n _, num_columns = shape(data)\n\n def matrix_entry(i, j):\n return correlation(get_column(data, i), get_column(data, j))\n\n return make_matrix(num_columns, num_columns, matrix_entry)", "def correlation_matrix(data):\n\n _, num_columns = shape(data)\n\n def matrix_entry(i, j):\n return correlation(get_column(data, i), get_column(data, j))\n\n return make_matrix(num_columns, num_columns, matrix_entry)", "def correlation(data):\n return corrcoef(np.transpose(np.reshape(data, ((data.shape[0] * data.shape[1]), data.shape[2]))))", "def _calc_correlation_matrix(data, kind='correlation'):\n correlation_measure = ConnectivityMeasure(kind=kind)\n correlation_matrix = correlation_measure.fit_transform([data])[0]\n\n return correlation_matrix", "def correlation_matrix(self):\n correlation_matrix = self.model.covariance.copy()\n sigmaD = np.sqrt(np.diag(correlation_matrix))\n for ii in range(correlation_matrix.shape[0]):\n for jj in range(correlation_matrix.shape[1]):\n correlation_matrix[ii, jj] /= sigmaD[ii] * sigmaD[jj]\n return correlation_matrix", "def _listcorr(a):\n corrs = np.zeros((a[0].shape[1], len(a), len(a)))\n for i in range(len(a)):\n for j in range(len(a)):\n if j > i:\n corrs[:, i, j] = [np.nan_to_num(np.corrcoef(ai, aj)[0, 1])\n for (ai, aj) in zip(a[i].T, a[j].T)]\n return corrs", "def correlate_columns(matrix):\n return np.dot(matrix.T, matrix) / (la.norm(matrix) ** 2)", "def test_correlation_matrix(self):\r\n a = [2, 4, 6, 8]\r\n b = [1.5, 1.4, 1.2, 1.1]\r\n c = [15, 10, 5, 20]\r\n m = correlation_matrix([a, b, c])\r\n self.assertFloatEqual(m[0, 0], [1.0])\r\n self.assertFloatEqual([m[1, 0], m[1, 1]], [correlation(b, a)[0], 1.0])\r\n self.assertFloatEqual(\r\n m[2], [correlation(c, a)[0], correlation(c, b)[0],\r\n 1.0])", "def compute_correlation_matrix_with_incomplete_data(df, correlation_type):\n X = copy.deepcopy(pd.DataFrame(df)) # make sure we are using a dataframe to do computations. \n assert correlation_type in ['spearman', 'pearson', 'covariance']\n X = X.astype(np.float64) # if we do not do this for some reason it ignores some columns in computing the correlation matrix. \n # which ends up being the wrong shape. \n if correlation_type == 'covariance':\n C = X.cov() * (len(df) - 1) / len(df) # need correction factor so it's consistent with ddof = 0. Makes little difference. \n else:\n C = X.corr(correlation_type)\n C = np.array(C)\n assert C.shape[0] == C.shape[1]\n assert C.shape[0] == len(df.columns)\n\n \n for i in range(len(C)):\n for j in range(len(C)):\n if np.isnan(C[i][j]):\n print(\"Warning: entry of covariance matrix is nan; setting to 0.\")\n C[i][j] = 0\n non_missing_data_counts = (~pd.isnull(X)).sum(axis = 0)\n return C, non_missing_data_counts", "def FormCorrelationMatrix(mat):\n nVars = len(mat[0])\n N = len(mat)\n \n res = numpy.zeros((nVars,nVars),'d')\n for i in range(nVars):\n x = mat[:,i]\n sumX = sum(x)\n sumX2 = sum(x*x)\n for j in range(i,nVars):\n y = mat[:,j]\n sumY = sum(y)\n sumY2 = sum(y*y)\n numerator = N*sum(x*y) - sumX*sumY\n denom = numpy.sqrt((N*sumX2-sumX**2)*(N*sumY2-sumY**2))\n if denom != 0.0:\n res[i,j] = numerator/denom\n res[j,i] = numerator/denom\n else:\n res[i,j] = 0\n res[j,i] = 0\n return res", "def FormCorrelationMatrix(mat):\n nVars = len(mat[0])\n N = len(mat)\n\n res = numpy.zeros((nVars, nVars), 'd')\n for i in range(nVars):\n x = mat[:, i]\n sumX = sum(x)\n sumX2 = sum(x * x)\n for j in range(i, nVars):\n y = mat[:, j]\n sumY = sum(y)\n sumY2 = sum(y * y)\n numerator = N * sum(x * y) - sumX * sumY\n denom = numpy.sqrt((N * sumX2 - sumX**2) * (N * sumY2 - sumY**2))\n if denom != 0.0:\n res[i, j] = numerator / denom\n res[j, i] = numerator / denom\n else:\n res[i, j] = 0\n res[j, i] = 0\n return res", "def correlate_rows(matrix):\n return np.dot(matrix, matrix.T) / (la.norm(matrix) ** 2)", "def correlation_d(mat):\n\n print(\"DO NOT USE. BROKEN?\")\n\n if mat.ndim != 2:\n raise ValueError(\"mat must be a 2d matrix\")\n if np.any(mat > 1) or np.any(mat < 0):\n raise ValueError(\"mat must be binary\")\n\n N = mat.size\n g = np.diagonal(mat)\n # g = np.tril(mat, -1) # g is the sum over the heavside used in Grassberger\n # g = g[g.nonzero()]\n g = g.sum()\n\n return (2.0 / N * (N - 1)) * g", "def correlation(C):\n\n if type(C) is not np.ndarray:\n raise TypeError('C must be a numpy.ndarray')\n if len(C.shape) < 2 or C.shape[0] is not C.shape[1]:\n raise ValueError('C must be a 2D square matrix')\n return C / np.sqrt(np.outer(np.diagonal(C), np.diagonal(C)))", "def calculate_correlation_matrix(X, Y=None):\n\tif Y is None:\n\t\tY = X\n\tn_samples = np.shape(X)[0]\n\tcovariance = (1 / n_samples) * (X - X.mean(0)).T.dot(Y - Y.mean(0))\n\tstd_dev_X = np.expand_dims(calculate_std_dev(X), 1)\n\tstd_dev_Y = np.exapnd_dims(calculate_std_dev(Y), 1)\n\tcorrelation_matrix = np.divide(covariance, std_dev_X.dot(std_dev_Y.T))\n\treturn np.array(correlation_matrix, dtype=float)", "def correlation_matrix(series, as_rows=True):\r\n return corrcoef(series, rowvar=as_rows)\r\n # unused codes below\r\n if as_rows:\r\n return corrcoef(transpose(array(series)))\r\n else:\r\n return corrcoef(array(series))", "def correlation_matrix(self, layout={}, **kwargs):\n df = self._data.corr()\n kwargs.update({\n 'zmin': -1, 'zmax': 1,\n 'colors': 'rdbu', 'ncolors': 9,\n 'xgap': 3, 'ygap': 3, 'dtick': 1,\n 'colorbar': {'x': 1 - 0.22},\n })\n\n layout = recursive_update(\n layout, updater={\n 'xaxis': {'showgrid': False, 'zeroline': False},\n 'yaxis': {'showgrid': False, 'zeroline': False},\n })\n\n # square for 1920x1080 screens in awating for better plotly option\n layout = recursive_update(\n layout, updater={\n 'yaxis': {'domain': [0, 1]},\n 'xaxis': {'domain': [0.28215, 1 - 0.28215]},\n })\n\n return df.iplot.heatmap(layout=layout, **kwargs)", "def correlation(data, method, caption):\n columns = list(data)\n coefficients = data.astype(float).corr(method=method)\n results = []\n for i in range(len(columns)):\n for j in range(i + 1, len(columns)):\n coefficient = coefficients[columns[i]][columns[j]]\n results.append((\n abs(coefficient), coefficient,\n columns[i] + ' x ' + columns[j]))\n print('# ' + caption + ', ' + method)\n for result in reversed(sorted(results)):\n abs_coefficient, coefficient, columns_pair = result\n print (coefficient, columns_pair)", "def Corr(x,y):\n \n cocoeff1 = np.empty((y.shape[1],y.shape[2]))\n cocoeff2 = np.empty((y.shape[1],y.shape[2]))\n for i in xrange(y.shape[1]):\n for j in xrange(y.shape[2]):\n cocoeff1[i,j],cocoeff2[i,j] = sts.pearsonr(x[:,i,j],y[:,i,j])\n \n print 'Completed: Correlation calculations!'\n \n return cocoeff1, cocoeff2", "def _compute_correlations(self, data):\n mappings = self.mappings_\n n_channels, n_times = data.shape\n\n # get the predictions\n y_pred = data.T.dot(mappings.T)\n y_pred = y_pred.reshape((n_times, len(self.picks),\n self.n_resample), order='F')\n # pool them using median\n # XXX: weird that original implementation sorts and takes middle value.\n # Isn't really the median if n_resample even\n y_pred = np.median(y_pred, axis=-1)\n # compute correlation\n num = np.sum(data.T * y_pred, axis=0)\n denom = (np.sqrt(np.sum(data.T ** 2, axis=0)) *\n np.sqrt(np.sum(y_pred ** 2, axis=0)))\n\n corr = num / denom\n return corr", "def calc_ic(data):\n return scs.spearmanr(data[:, 0], data[:, 1]).correlation", "def determine_correlation(var1,var2):\n v1 = np.array(var1)\n v2 = np.array(var2)\n mat = np.c_[(v1,v2)]# np.vstack((v1,v2)) #\n corr = np.corrcoef(mat.T)\n return corr[0][1]", "def correlation(C):\n if not isinstance(C, np.ndarray):\n raise TypeError(\"C must be a numpy.ndarray\")\n shape = C.shape\n if (len(shape) != 2) or shape[0] != shape[1]:\n raise ValueError(\"C must be a 2D square matrix\")\n\n diagonal = np.diag(C)\n\n # standard deviation\n std = np.sqrt(np.expand_dims(diagonal, axis=0))\n\n correlation = C / np.matmul(std.T, std)\n\n return correlation", "def cov_to_corr(matrix):\n sqrtdiag = np.sqrt(np.diag(matrix))\n return matrix / np.outer(sqrtdiag, sqrtdiag)", "def matthews_corr(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n # only one (diagonal) cell is non-zero\n return 0.5\n elif b == n or c == n:\n # only one (non-diagonal) cell is non-zero\n return -0.5\n elif p1 == n or p2 == n or q1 == n or q2 == n:\n # one row or column is zero, another non-zero\n return 0.0\n\n return _div(self.covar(), sqrt(p1 * q1 * p2 * q2))", "def corrcoef(self):\r\n return np.corrcoef(self.input.data)", "def calculate_correlation(self):\n self.network.index_nodes()\n self._calculate_dist()\n pearson_correlation, pearson_pvalue = scipy.stats.pearsonr(self.dist[:,0], self.dist[:,1])\n spearman_correlation, spearman_pvalue = scipy.stats.spearmanr(self.dist[:,0], self.dist[:,1])\n return pearson_correlation, pearson_pvalue, spearman_correlation, spearman_pvalue", "def get_corrmat(self, f):\n return self._get_corrmat(f)", "def calculate_correlations(input_data, index_col, cat_features, exclu_elements): \r\n try:\r\n # encode the categorical features\r\n encoded_data = pd.get_dummies(input_data,columns=cat_features,drop_first=True)\r\n\r\n pd_transposed_data = encoded_data.set_index('Style_display_code').T\r\n\r\n # get the number of items\r\n items_list = [str(a) for a in pd_transposed_data.columns]\r\n\r\n print(\"Number of items to correlate :{}_Timestamp:{}\".format(str(len(items_list)), \r\n format(str(datetime.now()))))\r\n \r\n\r\n #compute correlations and save the pickle file\r\n# matrix = pd_transposed_data.corr().values\r\n# pickle.dump(matrix, open(staging_dir+ '/corr_matrix_output_py3.p', 'wb'))\r\n \r\n # read from the saved pickle file - ONLY FOR CONSECUTIVE RUNS, TO SAVE TIME\r\n matrix = pickle.load(open(staging_dir+ '/corr_matrix_output_py3.p', \"rb\" ) )\r\n\r\n print(\"Corr Matrix size:{}_Timestamp:{}\".format(str(matrix.size),\r\n format(str(datetime.now()))))\r\n\r\n except Exception as e:\r\n print(\" Error !!\", e)\r\n \r\n # return the top correlated items\r\n return top_correlateditems(items_list,matrix, index_col, exclu_elements)", "def get_corr(self):\r\n cov = self.data.values\r\n with np.errstate(divide='ignore', invalid='ignore'):\r\n coeff = np.true_divide(1, self.get_std().values)\r\n coeff[~ np.isfinite(coeff)] = 0 # -inf inf NaN\r\n corr = np.multiply(np.multiply(cov, coeff).T, coeff)\r\n df = pd.DataFrame(\r\n corr,\r\n index=self.data.index,\r\n columns=self.data.columns,\r\n )\r\n return self.__class__(df)", "def fast_corr(df, col_name):\n\n if not isinstance(df, pd.DataFrame):\n raise TypeError(\"The type of the input data must be dataframe.\")\n\n if not isinstance(col_name, list):\n raise TypeError(\"The col_name must be list.\")\n\n if all(isinstance(item, str) for item in col_name) is False and all(\n isinstance(item, int) for item in col_name) is False:\n raise ValueError(\n \"The col_name must be a list of strings or a list of integers.\")\n\n if len(col_name) < 2:\n raise ValueError(\n \"At least two columns must be selected for correlation analysis.\")\n\n if all(isinstance(item, str) for item in col_name) is True and all(\n elem in df.columns.to_list() for elem in col_name) is False:\n raise ValueError(\"The column names were not found.\")\n\n if all(isinstance(item, int) for item in col_name) is True and max(\n col_name) > (df.shape[1] - 1):\n raise ValueError(\"The column indexes were out of range.\")\n\n if all(isinstance(item, str) for item in col_name):\n data = df.loc[:, col_name]\n else:\n data = df.iloc[:, col_name]\n\n data2 = data._get_numeric_data()\n rm_n = data.shape[1] - data2.shape[1]\n print(\"Removed\", rm_n, \"non-numberical columns from your selected columns\")\n\n sns.set(style=\"white\")\n corr = data2.corr()\n mask = np.triu(np.ones_like(corr, dtype=np.bool))\n f, ax = plt.subplots(figsize=(9, 11))\n ax.set_title('Correlation Matrix', size=20)\n ax.tick_params(axis='x', labelsize=15)\n ax.tick_params(axis='y', labelsize=15)\n\n cmap = sns.diverging_palette(220, 20, as_cmap=True)\n p = sns.heatmap(corr, mask=mask, cmap=cmap, vmin=-1, vmax=1, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\n p.set_yticklabels(p.get_yticklabels(), rotation=360)\n return p", "def correlation(result, reference):\n \n r = np.corrcoef(result, reference)[0,1]\n \n return r", "def einsum_correlation(X, Y_i, type=\"pearson\"):\n\n if type == \"pearson\":\n X -= X.mean(axis=1)[:, None]\n Y_i -= np.nanmean(Y_i)\n elif type == \"cosine\":\n X, Y_i = X, Y_i\n elif type == \"spearman\":\n # check this\n X = stats.rankdata(X, axis=1)\n Y_i = stats.rankdata(Y_i)\n elif type == \"kendalltau\":\n corr = np.array([stats.kendalltau(x, Y_i)[0] for x in X])\n return corr[None, :]\n\n X_norm, Y_norm = norm(X, axis=1), norm(Y_i)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n if Y_norm == 0:\n corr = np.zeros(X_norm.shape[0])\n else:\n corr = np.einsum(\"ij, j\", X, Y_i) / (X_norm * Y_norm)[None, :]\n\n return corr", "def _compute_correlation(self, metric_array, corr_type):\n transposed_channels = np.transpose(metric_array)\n df = pd.DataFrame(transposed_channels)\n corr_matrix = df.corr().values\n upper_diag_idxs = np.triu_indices(self.n_channels, 1)\n upper_diag_elems = corr_matrix[upper_diag_idxs]\n\n correlation_dict = {}\n for i, j, corr in zip(*upper_diag_idxs, upper_diag_elems):\n corr_id = 'ch_' + corr_type + '_' + str(i) + '_' + str(j)\n correlation_dict.update(\n {corr_id: corr}\n )\n return correlation_dict", "def linear_rate_corr_matrix(R, which='corrcoef'):\n if which not in ('corrcoef', 'sim'):\n raise ValueError, 'invalid comparison type specified by which keyword'\n \n # Set us up the matrix\n npixels = 100\n M = numpy.empty((npixels,)*2, 'd')\n \n # Scan the diagonal from (0,0) to (100,100)\n if which is 'corrcoef':\n print 'Pearson correlation matrix...'\n for i in xrange(npixels):\n r_i = R[:,npixels-i-1, i]\n for j in xrange(npixels):\n r_j = R[:,npixels-j-1, j]\n r_corr = pearsonr(r_i, r_j)[0]\n if numpy.isnan(r_corr) or r_corr < 0:\n M[i, j] = M[j, i] = 0.0\n else:\n M[i, j] = M[j, i] = r_corr\n elif which is 'sim':\n print 'Cosine similarity matrix...'\n for i in xrange(npixels):\n r_i = R[:,npixels-i-1, i]\n r_i_norm = numpy.sqrt(numpy.dot(r_i, r_i))\n for j in xrange(npixels):\n r_j = R[:,npixels-j-1, j]\n r_j_norm = numpy.sqrt(numpy.dot(r_j, r_j))\n r_sim = numpy.dot(r_i, r_j) / (r_i_norm * r_j_norm)\n if numpy.isnan(r_sim):\n M[i, j] = M[j, i] = 0.0\n else:\n M[i, j] = M[j, i] = r_sim\n else:\n raise ValueError, 'invalid correlation measure specified: \\'%s\\''%which\n\n return M", "def calculate_correlation(data):\n pass", "def get_correlation(df):\n frame_correlation = df.corr()\n return frame_correlation", "def _c_correlation(cls, X, y):\n su = np.zeros(X.shape[1])\n for i in np.arange(X.shape[1]):\n su[i] = cls._symmetrical_uncertainty(X[:, i], y)\n return su", "def get_corr(self):\n return self.corr_matrix, self.corr_signature", "def _mn_cor_ ( self , size = -1 , root = False ) :\n #\n cov = self.cov ( size , root )\n #\n from math import sqrt\n #\n if isinstance ( cov , ROOT.TMatrix ) :\n\n size = cov.GetNrows()\n root = True\n \n else : size = cov.kRows\n\n ## use ROOT matrices \n if root : cor = ROOT.TMatrix ( size , size )\n else : cor = cov.__class__ () \n\n for i in range(0, size ) :\n \n d_i = cov ( i , i )\n cor [ i , i ] = 1 if 0 < d_i else 0\n \n for j in range ( i + 1 , size ) :\n \n d_j = cov ( j , j )\n \n if 0 != cov ( i , j ) and 0 < d_i and 0 < d_j :\n \n if root and _rv < 6 : cor [ i ] [ j ] = cov ( i , j ) / sqrt ( d_i * d_j )\n else : cor [ i , j ] = cov ( i , j ) / sqrt ( d_i * d_j )\n \n else :\n \n if root and _rv < 6 : cor [ i ] [ j ] = 0 \n else : cor [ i , j ] = 0\n\n return cor", "def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue", "def mcorr(x,y):\n return ((np.ma.dot(x,y) / (x.shape[0] - 1) / y.std(axis=0)) / x.std())", "def pairwise_corr(df1, df2):\n res = []\n for i in range(df2.shape[1]):\n res.append(df1.corrwith(df2.ix[:, i]))\n res = pd.concat(res, axis=1)\n res.columns = df2.columns\n return res", "def plot_corr_matrix(df):\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr().abs(), fignum=f.number)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16)", "def correlation(G, variables = [], conditionants = []):\n \n cov = covariance(G, variables = variables, \n conditionants = conditionants)\n k = cov.shape[0]\n sds = sp.Matrix([1/sp.sqrt(cov[i, i]) for i \n in range(0, k)]*k).reshape(k, k)\n \n cor = cov.multiply_elementwise(sds).multiply_elementwise(sds.T)\n return cor.applyfunc(sp.simplify)", "def _mat_mat_corr_sparse(\n X: csr_matrix,\n Y: np.ndarray,\n) -> np.ndarray:\n n = X.shape[1]\n\n X_bar = np.reshape(np.array(X.mean(axis=1)), (-1, 1))\n X_std = np.reshape(\n np.sqrt(np.array(X.power(2).mean(axis=1)) - (X_bar ** 2)), (-1, 1)\n )\n\n y_bar = np.reshape(np.mean(Y, axis=0), (1, -1))\n y_std = np.reshape(np.std(Y, axis=0), (1, -1))\n\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings(\n \"ignore\", r\"invalid value encountered in true_divide\"\n )\n return (X @ Y - (n * X_bar * y_bar)) / ((n - 1) * X_std * y_std)", "def correl_dist(corr):\n dist = ((1 - corr) / 2.0) ** 0.5 # distance matrix\n return dist", "def plot_correlations(data):\n\n from matplotlib import cm\n \n cols = data.columns.tolist()\n fig = plt.figure(figsize=(12,12))\n ax = fig.add_subplot(111)\n \n # Plot absolute value of pairwise correlations since we don't\n # particularly care about the direction of the relationship,\n # just the strength of it\n cax = ax.matshow(data.corr().abs(), cmap=cm.YlOrRd)\n \n fig.colorbar(cax)\n ax.set_xticks(np.arange(len(cols)))\n ax.set_yticks(np.arange(len(cols)))\n ax.set_xticklabels(cols)\n ax.set_yticklabels(cols)", "def correlation(x_items, y_items):\r\n return correlation_test(x_items, y_items, method='pearson', tails=None,\r\n permutations=0)[:2]", "def corr(self) -> 'DataFrame':\n if self._is_string():\n raise TypeError('DataFrame consists only of strings. Must have int, float, '\n 'or bool columns')\n\n x: ndarray = self._values_number()\n if x.dtype.kind == 'i':\n x0: ndarray = x[0]\n x_diff: ndarray = x - x0\n Exy: ndarray = (x_diff.T @ x_diff)\n Ex: ndarray = x_diff.sum(0)[np.newaxis, :]\n ExEy: ndarray = Ex.T @ Ex\n counts: Union[int, ndarray] = len(x)\n Ex2: ndarray = (x_diff ** 2).sum(0)\n\n else:\n x0 = _math.get_first_non_nan(x)\n x_diff = x - x0\n x_not_nan: ndarray = (~np.isnan(x)).astype(int)\n\n # get index of first non nan too and check for nan here\n x_diff_0: ndarray = np.nan_to_num(x_diff)\n counts = (x_not_nan.T @ x_not_nan)\n Exy = (x_diff_0.T @ x_diff_0)\n Ex = (x_diff_0.T @ x_not_nan)\n ExEy = Ex * Ex.T\n Ex2 = (x_diff_0.T ** 2 @ x_not_nan)\n\n with np.errstate(invalid='ignore'):\n cov: ndarray = (Exy - ExEy / counts) / (counts - 1)\n stdx: ndarray = (Ex2 - Ex ** 2 / counts) / (counts - 1)\n stdxy: ndarray = stdx * stdx.T\n corr: ndarray = cov / np.sqrt(stdxy)\n\n new_data: Dict[str, ndarray] = {'f': np.asfortranarray(corr)}\n new_column_info: ColInfoT = {'Column Name': utils.Column('S', 0, 0)}\n new_columns: ndarray = np.empty(x.shape[1] + 1, dtype='O')\n new_columns[0] = 'Column Name'\n\n i: int = 0\n for col, dtype, loc in self._col_info_iter(): # type: str, str, int\n if dtype not in 'ifb':\n continue\n new_column_info[col] = utils.Column('f', i, i + 1)\n new_columns[i + 1] = col\n i += 1\n new_data['S'] = np.asfortranarray(new_columns[1:])[:, np.newaxis]\n return self._construct_from_new(new_data, new_column_info,\n np.asarray(new_columns, dtype='O'))", "def correlation_matrix(X, cols=None, dropna=True, ax=None):\n try:\n import seaborn as sns\n except ImportError:\n raise ImportError(\"This function requires seaborn. \"\n \"You can install it via $ pip install seaborn.\")\n if not isinstance(X, pd.DataFrame):\n raise TypeError(f'\"X\" must be a Pandas DataFrame, not {type(X)}.')\n if cols is not None:\n X = X[cols]\n # Calculate column-wise correlation:\n corr = X.corr()\n if dropna:\n corr.dropna(how='all', axis=0, inplace=True)\n corr.dropna(how='all', axis=1, inplace=True)\n if ax is None:\n ax = plt.gca()\n sns.heatmap(corr, mask=np.triu(np.ones(corr.shape), k=1).astype(bool),\n vmin=-1, vmax=1, cmap='coolwarm', linewidths=1, ax=ax,\n cbar_kws={\"shrink\": .75})\n return ax", "def correlation_1D2D_datainput(df1, df2): # correlation function from Rémi (local.py)\n\n df2 = df2.transpose() # sigs_2D.transpose()\n cov = np.dot(df1 - df1.mean(), df2 - df2.mean(axis=0)) / (df2.shape[0] - 1)\n # ddof=1 necessary because covariance estimate is unbiased (divided by n-1)\n p_var = np.sqrt(np.var(df1, ddof=1) * np.var(df2, axis=0, ddof=1))\n r = cov / p_var\n return r", "def corrcoef(self):\n return self.cov / self.std / self.std[:, None]", "def computeCorrelationMatrix(allData,stocklist):\n \n correlation=allData[[i for i in stocklist]]\n \n correlation=pd.DataFrame({y:x for y,x in zip(stocklist,np.corrcoef(correlation,rowvar=False))})\n \n correlation[\"index\"]=pd.DataFrame(stocklist)\n correlation=correlation.set_index(\"index\")\n \n \"Create legends for the heatmap of the correlation matrix\"\n \n combinations=np.array([i[0]+i[1] for i in list(itertools.product(stocklist,repeat=2))])\n '''shape(9,9)'''\n \n \"Create a correlation matrix with the stock choises\"\n \n snb.heatmap(correlation,cmap='Spectral')\n \n return correlation", "def cov_to_corr(cy):\n \n N = len(cy)\n \n corr = np.zeros((N,N))\n \n sd = np.sqrt(np.diag(cy))\n \n sdinv = np.diag(1/sd)\n \n corr = np.dot(np.dot(sdinv,cy),sdinv)\n \n #print(np.shape(corr))\n return corr", "def _calculate_correlation(self, anomaly):\n if self.silence_level <= 1:\n print(\"Calculating partial correlation matrix at zero lag from \"\n \"anomaly values...\")\n\n # Calculate the correlation matrix, cast to float64 for precise\n # calculation of inverse matrix.\n C = np.corrcoef(anomaly.transpose()).astype(\"float64\")\n\n # Calculate the inverse correlation matrix\n if np.linalg.det(C) != 0.0:\n C_inv = np.linalg.inv(C)\n else:\n C_inv = np.linalg.pinv(C)\n\n # Clean up\n del C\n\n # Get the diagonal of the inverse correlation matrix\n diag = C_inv.diagonal()[:]\n\n # Calculate matrix of normalizations\n norm = np.sqrt(abs(np.outer(diag, diag)))\n\n return - C_inv / norm", "def CORR(A: pd.DataFrame, B: pd.DataFrame, n) -> pd.DataFrame:\r\n A = A.unstack()\r\n B = B.unstack()\r\n res = A.rolling(n).corr(B)\r\n return res.stack()", "def calculate_feature_corr(self):\n \n return self.train_data.astype(float).corr(method='kendall')", "def corr(A,B):\n\n # Rowwise mean of input arrays & subtract from input arrays themeselves\n A_mA = A - A.mean(1)[:,None]\n B_mB = B - B.mean(1)[:,None]\n\n # Sum of squares across rows\n ssA = (A_mA**2).sum(1);\n ssB = (B_mB**2).sum(1);\n\n # Finally get corr coeff\n return np.dot(A_mA,B_mB.T)/np.sqrt(np.dot(ssA[:,None],ssB[None]))", "def correlation(self):\r\n\r\n c = np.corrcoef(self.input.data)\r\n c = c[tril_indices_from(c, -1)]\r\n\r\n return np.mean(c), stats.sem(c)", "def computeCorr(pred_act,responses):\n\n num_pres,num_neurons = np.shape(responses)\n corr=np.zeros(num_neurons)\n \n for i in xrange(0,num_neurons):\n if np.all(pred_act[:,i]==0) & np.all(responses[:,i]==0):\n corr[i]=1.\n elif not(np.all(pred_act[:,i]==0) | np.all(responses[:,i]==0)):\n # /!\\ To prevent errors due to very low values during computation of correlation\n if abs(pred_act[:,i]).max()<1:\n pred_act[:,i]=pred_act[:,i]/abs(pred_act[:,i]).max()\n if abs(responses[:,i]).max()<1:\n responses[:,i]=responses[:,i]/abs(responses[:,i]).max() \n corr[i]=pearsonr(np.array(responses)[:,i].flatten(),np.array(pred_act)[:,i].flatten())[0]\n \n return corr", "def correlation(row):\n return row['correlation']", "def _pearson_correlation_coeff(x_data, y_data):\n reg = linregress(x_data, y_data)\n return reg.rvalue", "def get_top_correlations(dataframe,columns,frame_type='spark'):\n if frame_type == 'spark':\n import math\n correlation_list = []\n correlations_finished = [] #hold correlatons done to prevent repitition\n for i, col_i in enumerate(columns):\n for j, col_j in enumerate(columns):\n if col_i+col_j not in correlations_finished: # don't repeat\n columns = [col_i,col_j]\n correlation = dataframe.stat.corr(col_i,col_j)\n if math.isnan(correlation):\n correlation=0.0\n correlation_list.append({\n 'columns': columns,\n 'correlation': correlation,\n 'correlation_abs':math.fabs(correlation),\n })\n # print({\n # 'columns': columns,\n # 'correlation': correlation,\n # 'correlation_abs':math.fabs(correlation),\n # })\n correlations_finished.append(col_i+col_j)\n #sort the list so highest correlations are first\n correlation_list = sorted(correlation_list, key=lambda x: x['correlation_abs'], reverse=True)\n return correlation_list\n else:\n pass", "def correlation_eye(data, left, right):\r\n corr = data[left].corr(data[right], method='pearson')\r\n return corr", "def correlation(self):\n\n c = np.corrcoef(self.input.data)\n c = c[tril_indices_from(c, -1)]\n\n return np.mean(c), stats.sem(c)", "def calculate_correlation(df, vars_to_corr, target_var) :\n\n\n mean = df[target_var].mean()\n sigma = df[target_var].std()\n\n correlation = []\n error = []\n\n for j in vars_to_corr :\n mean_j = df[j].mean()\n sigma_j = df[j].std()\n\n cov = (df[j] - mean_j) * (df[target_var] - mean) / (sigma*sigma_j)\n correlation.append(cov.mean())\n error.append(sem(cov))\n\n return correlation, error", "def calcCovarianceMatrix(data):\n # Create covariance matrix and array to store the mean values for x_mean, y_mean, z_mean\n C = np.zeros((data.shape[1], data.shape[1]))\n mean_xyz = []\n # Calculate all mean values\n for i in range(0, data.shape[1]):\n mean_xyz.append(data[:,i].mean())\n mean_xyz = np.array(mean_xyz)\n # Check whether dimensions agree \n if data[:,0].size != data[:,1].size or data[:,0].size != data[:,2].size:\n print \"X, Y and Z must be of same dimensions.\"\n else:\n # For each row in covariance matrix C\n for i in range(0, C.shape[0]):\n # For each column in covariance matrix C\n for j in range(0, C.shape[1]):\n C[i,j] = 0\n # For each point in the dataset, access x, y, z-values\n for point in data:\n # For each point, access x,y and z in all combinations (xx, xy, xz, yx, yy, yz etc)\n C[i][j] = C[i][j] + (point[i]-mean_xyz[i])*(point[j]-mean_xyz[j])\n # Divide by the total number of points \n C = (1.0/data.shape[0]) * C\n return C", "def pairwise_correlations(self, views: Iterable[np.ndarray], **kwargs):\n transformed_views = self.transform(views, **kwargs)\n all_corrs = []\n for x, y in itertools.product(transformed_views, repeat=2):\n all_corrs.append(\n np.diag(\n np.corrcoef(x.T, y.T)[\n : self.latent_dimensions, self.latent_dimensions :\n ]\n )\n )\n try:\n all_corrs = np.array(all_corrs).reshape(\n (self.n_views_, self.n_views_, self.latent_dimensions)\n )\n except:\n print()\n return all_corrs", "def _compute_corr(fmap):\n fmap = fmap.view(fmap.size(0), fmap.size(1), -1)\n fmap = nn.functional.normalize(fmap, dim=2, eps=1e-08)\n corr = torch.bmm(fmap.permute(0, 2, 1), fmap)\n return corr.view(corr.size(0), -1)", "def correlation(self,M,operator,site_i,site_j):\n minsite = min(site_i,site_j)\n maxsite = max(site_i,site_j)\n u = np.array([[1]])\n for i in range(0,minsite):\n M[i] = np.tensordot(u, M[i],axes=(-1,1)).transpose(1,0,2)\n l,u = self.left_cannonical(M[i])\n M[i] = l\n M[minsite] = np.tensordot(u, M[minsite]).transpose(1,0,2)\n MP = np.tensordot(M[minsite],operator,axes=(0,0))\n MPI = np.tensordot(MP, np.conj(M[minsite]),axes=(-1,0))\n MPI = MPI.transpose([0,2,1,3])\n for i in range(minsite+1,maxsite):\n MI = np.tensordot(MPI, M[i],axes=(2,1))\n MPI = np.tensordot(MI, np.conj(M[i]), axes=([3,2],[0,1]))\n\n MP = np.tensordot(M[maxsite],operator,axes=(0,0))\n MPJ = np.tensordot(MP, np.conj(M[maxsite]),axes=(-1,0))\n MPJ = MPJ.transpose([0,2,1,3])\n\n product = np.tensordot(MPI,MPJ, axes=([2,3,0,1]))\n correlation = np.trace(product)\n\n return correlation", "def matrix_heatmap():\n\n # retrieve data\n df = pd.read_csv('./housing.csv')\n df = df.dropna()\n\n # plot heatmap\n plt.figure(figsize=(10, 6))\n sns.heatmap(cbar=False, annot=True, data=df.corr(), cmap='coolwarm')\n plt.title('Correlation Matrix')\n\n plt.show()\n\n return None", "def distance_matrix(data):\n D = numpy.zeros( (data.shape[0], data.shape[0]) )\n for i in xrange(data.shape[0]):\n for j in xrange(i):\n D[i,j] = numpy.linalg.norm(data[i,:]-data[j,:])\n D[j,i] = D[i,j]\n\n return D", "def setup_corr_mat(k, N):\n full_corr_mat = _sliding_windows(k, N)\n overhang = full_corr_mat.shape[-1] - N\n if overhang % 2 == 1:\n front = int((overhang + 1) / 2) - 1\n back = front + 1\n else:\n front = back = int(overhang / 2)\n corr_mat = full_corr_mat[:, front:-back]\n\n return corr_mat", "def mp_corr(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n # only one (diagonal) cell is non-zero\n return 0.5\n elif b == n or c == n:\n # only one (non-diagonal) cell is non-zero\n return -0.5\n\n return _div(2 * self.covar(), p1 * q1 + p2 * q2)", "def correlation(self) -> int:\n return self._correlation", "def get_correlation(spreadsheet_mat, phenotype_response, run_parameters):\n correlation_array = np.zeros(spreadsheet_mat.shape[0])\n if 'correlation_measure' in run_parameters:\n if run_parameters['correlation_measure'] == 'pearson':\n\n spreadsheet_mat = spreadsheet_mat - spreadsheet_mat.mean(axis=1).reshape((-1, 1))\n phenotype_response = phenotype_response - phenotype_response.mean()\n spreadsheet_mat_var = np.std(spreadsheet_mat, axis=1)\n phenotype_response_var = np.std(phenotype_response)\n numerator = spreadsheet_mat.dot(phenotype_response)\n denominator = spreadsheet_mat_var * phenotype_response_var * spreadsheet_mat.shape[1]\n with np.errstate(divide='ignore', invalid='ignore'):\n correlation_array = np.true_divide(numerator, denominator)\n correlation_array[denominator==0] = 0\n\n return correlation_array\n\n if run_parameters['correlation_measure'] == 't_test':\n \n a = spreadsheet_mat[:, phenotype_response!=0]\n b = spreadsheet_mat[:, phenotype_response==0]\n d = np.mean(a, axis=1) - np.mean(b, axis=1)\n denom = np.sqrt(np.var(a, axis=1, ddof=1)/a.shape[1] + np.var(b, axis=1, ddof=1)/b.shape[1])\n with np.errstate(divide='ignore', invalid='ignore'):\n correlation_array = np.divide(d, denom)\n correlation_array[np.isnan(denom)] = 0\n correlation_array = np.abs(correlation_array)\n\n return correlation_array\n\n return correlation_array", "def covariance(data_matrix):\n return np.asmatrix(np.cov(data_matrix, rowvar=0))", "def get_correlation(outcome_vectors, s_hat):\n\n outcome_vectors2 = np.asfortranarray(outcome_vectors.data)\n ldl_shat = np.asfortranarray(s_hat)\n return fast_corr.manual_corr(outcome_vectors2.T, ldl_shat.T)", "def get_avg_correlation_from_matrix(zz):\n L=zz.shape[0]\n ns=L-1\n #zzbar = np.zeros((ns, *zz.shape[2:]))\n zzbar = np.zeros_like(zz)\n for i in range(ns):\n s=i+1\n zzbar[i, ...] = np.mean(np.asarray([zz[ii, ii+s, ...] for ii in range(L-s)]), axis=0)\n return zzbar", "def plot_corr_matrix(dataset):\n\tprint(\"\\tGenerating correlation matrix\")\n\n\tsns.set(style=\"white\")\n\tmatrix = dataset.corr(method=\"pearson\")\n\n\tmask = np.zeros_like(matrix, dtype=np.bool)\n\tmask[np.triu_indices_from(mask)] = True\n\n\tplt.subplots(figsize=(7, 7))\n\tcmap = sns.diverging_palette(220, 10, as_cmap=True)\n\n\tsns.heatmap(matrix, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\n\n\tplt.savefig(PARENT_DIR + \"/img/correlation_matrix.png\", bbox_inches='tight')", "def correlation(x, y):\n return covariance(x, y) / (sd(x) * sd(y))", "def cofactor_matrix(self):\n resp = []\n len_b = len(self.take_vec())\n for i in range(self.order):\n _matrix = aux.cofactor(self.take_matrix(),\n (i, self.order-1)\n )\n _resp = math.pow(-1, len_b-1)\n _resp = _resp * np.linalg.det(_matrix)\n _resp = _resp * math.pow(-1, i * (self.order-1))\n resp.append(int(round(_resp)))\n\n return resp", "def construct_corr_mat(reg_fi=None, reg_fij=None, seqs_len=None,\n mx=None):\n # corr_mat_len = seqs_len * (num_site_states - 1)\n corr_mat_len = mx.cumsum()[-1]\n print('Generating NxN correlation matrix with N=', corr_mat_len)\n corr_mat = np.zeros((corr_mat_len, corr_mat_len), dtype=np.float64)\n pair_counter = 0\n for i in range(seqs_len - 1):\n if i == 0:\n site_i = 0\n else:\n site_i = mx.cumsum()[i - 1]\n for j in range(i + 1, seqs_len):\n site_j = mx.cumsum()[j - 1]\n for a in range(mx[i]):\n row = site_i + a\n for b in range(mx[j]):\n col = site_j + b\n if i == j:\n print('Iteration through non-symmetric reg_fij list is not working ')\n sys.exit()\n else:\n try:\n corr_ij_ab = reg_fij[pair_counter][a][b] - reg_fi[i][a] * reg_fi[j][b]\n except IndexError:\n print('pair %d: (%d,%d)' % (pair_counter, i, j))\n print('Indices: ', mx.cumsum())\n print('Site Counts: ', mx)\n print('Index out of bound')\n print('par ranges: a= [%d,%d],b= [%d,%d]' % (\n site_i, site_i + range(mx[i])[-1], site_j, site_j + range(mx[j])[-1]))\n print('pair_counter = %d of %d (%d)' % (pair_counter, len(reg_fij), len(reg_fij)))\n print('i site state = %d of %d (%d)' % (a, mx[i], len(reg_fij[pair_counter])))\n print(b)\n sys.exit()\n # print(corr_mat)\n # print(corr_ij_ab)\n try:\n corr_mat[row, col] = corr_ij_ab\n corr_mat[col, row] = corr_ij_ab\n except IndexError:\n print('ERROR: \\n row = %d of %d' % (row, mx.cumsum()[-1]))\n print(' \\n col = %d of %d' % (col, mx.cumsum()[-1]))\n sys.exit()\n\n if i != j: pair_counter += 1\n # fill in diagonal block\n for ii, site_block in enumerate(mx):\n if ii == 0:\n site_block_start = 0\n else:\n site_block_start = mx.cumsum()[ii - 1]\n for a in range(site_block):\n for b in range(a, site_block):\n row = site_block_start + a\n col = site_block_start + b\n # print('combo (%d,%d)'%(row,col))\n fia, fib = reg_fi[ii][a], reg_fi[ii][b]\n corr_ij_ab = fia * (1.0 - fia) if a == b else -1.0 * fia * fib\n corr_mat[row, col] = corr_ij_ab\n corr_mat[col, row] = corr_ij_ab\n\n return corr_mat", "def corr(arr1, arr2):\n\n\n X = []\n Y = []\n for index in range(len(arr1)):\n if arr1[index] == None or arr2[index] == None:\n continue\n X.append(arr1[index])\n Y.append(arr2[index])\n\n\n r = np.corrcoef(X, Y)[0,1]\n f = 0.5*np.log((1+r)/(1-r))\n se = 1/np.sqrt(len(X)-3)\n ucl = f + 2*se\n lcl = f - 2*se\n\n lcl = (np.exp(2*lcl) - 1) / (np.exp(2*lcl) + 1)\n ucl = (np.exp(2*ucl) - 1) / (np.exp(2*ucl) + 1)\n\n return r,lcl,ucl", "def method_1(cor_mat, out_len):\n if cor_2x2(cor_mat):\n x_one = new_array(out_len)\n x_two = new_array(out_len)\n c = cor_mat.item((0,1))\n y = c * x_one + ((1 - c**2)**0.5) * x_two\n return np.matrix([x_one,y]).T\n else:\n raise ValueError('Not a 2x2 correlation Matrix')", "def corr_coeff(self) -> float:\n correlation_coefficient = np.corrcoef(self.true, self.predicted)[0, 1]\n return float(correlation_coefficient)", "def _corr_ax1(input_image):\n dim = input_image.shape[1]\n m_ones = np.ones(dim)\n norm_mask = np.correlate(m_ones, m_ones, mode=\"full\")\n # not sure that the /2 is the correct correction\n est_by_row = [np.argmax(np.correlate(v, v[::-1], mode=\"full\") / norm_mask) / 2 for v in input_image]\n return np.histogram(est_by_row, bins=np.arange(0, dim + 1))", "def correlation(self) -> List[float]:\n self.pearson_corr = self.sim_data[\"Human (mean)\"].corr(self.sim_data[\"assigned_sim\"], method=\"pearson\")\n self.spearman_corr = self.sim_data[\"Human (mean)\"].corr(self.sim_data[\"assigned_sim\"], method=\"spearman\")\n return [self.pearson_corr, self.spearman_corr]", "def tf_pearson_correlation(x_proj, y_proj):\n mx = tf.reduce_mean(x_proj, axis=0)\n my = tf.reduce_mean(y_proj, axis=0)\n xm = x_proj - mx\n ym = y_proj - my\n r_num = tf.matmul(tf.transpose(xm), ym)\n r_den = tf.sqrt(tf.reduce_sum(tf.square(xm), axis=0) * tf.reduce_sum(tf.square(ym), axis=0))\n r_mat = tf.divide(r_num, r_den)\n r_vals = tf.diag_part(r_mat)\n return r_vals", "def test_distance_correlation_naive(self):\n matrix1 = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))\n matrix2 = np.array(((7, 3, 6), (2, 1, 4), (3, 8, 1)))\n matrix3 = np.array(((1, 1, 1), (2, 1, 1), (1, 1, 1)))\n constant_matrix = np.ones((3, 3))\n\n correlation = dcor.distance_correlation_sqr(\n matrix1, matrix1)\n self.assertAlmostEqual(correlation, 1)\n\n correlation = dcor.distance_correlation_sqr(\n matrix1, constant_matrix)\n self.assertAlmostEqual(correlation, 0)\n\n correlation = dcor.distance_correlation_sqr(\n matrix1, matrix2)\n self.assertAlmostEqual(correlation, 0.93387, places=5)\n\n correlation = dcor.distance_correlation_sqr(\n matrix1, matrix3)\n self.assertAlmostEqual(correlation, 0.31623, places=5)", "def cor(x, y):\n scaler = TimeSeriesScalerMeanVariance()\n x_norm = scaler.fit_transform(x)\n y_norm = scaler.fit_transform(y)\n pcc = np.mean(x_norm * y_norm) # Pearson correlation coefficients\n d = np.sqrt(2.0 * (1.0 - pcc + 1e-9)) # correlation-based similarities\n return np.sum(d)", "def convmat(signal_size,kernel,dtype=_dtype):\n\tassert (kernel.size%2==1), \"kernel is assumed to have odd number of elements\"\n\n\tmat = sp.dia_matrix( (signal_size,signal_size), dtype=dtype )\n\n\thalf_ker_size = kernel.size//2\n\t# correlation\n\tfor i in range(-half_ker_size,half_ker_size+1):\n\t\tif ( kernel[half_ker_size+i]!=0 ):\n\t\t\tmat.setdiag(kernel[half_ker_size+i],i)\n\t# # convolution\n\t# for i in range(-half_ker_size,half_ker_size+1):\n\t# \tif ( kernel[half_ker_size-i]!=0 ):\n\t# \t\tmat.setdiag(kernel[half_ker_size-i],i)\n\n\treturn mat", "def get_covariance(data_array):\n number_of_data = len(data_array)\n number_of_features = len(data_array[0]) if number_of_data != 0 else 0\n covariance_matrix = numpy.zeros([number_of_features, number_of_features])\n mean = numpy.zeros(number_of_features)\n for data in data_array:\n numpy.add(mean, data, mean)\n numpy.divide(mean, number_of_data, mean)\n for i in xrange(number_of_features):\n temp = numpy.zeros(number_of_data)\n for j in xrange(number_of_data):\n temp[j] = data_array[j][i] - mean[i]\n covariance_matrix[i][i] = max(numpy.dot(temp, temp.transpose()), 0.00001) / number_of_data\n return covariance_matrix, mean", "def correlation_matrix(self, attr_list, method=\"pearson\"):\n corr_matrix = self.data.corr(method=method)\n corr_dict = {}\n for attr in attr_list:\n corr_dict[attr] = list(corr_matrix[attr][attr_list].values)\n return json.dumps(corr_dict)", "def create_corr_mat(data, annot_sz=30):\n assert isinstance(data, pd.DataFrame)\n assert isinstance(annot_sz, int) \n corr = data.corr()\n \n ax = plt.figure()\n ax.set_facecolor('xkcd:black')\n mat = sns.heatmap(corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True, annot=True, annot_kws={\"size\": annot_sz})\n mat.set_xticklabels(mat.get_xticklabels(), rotation=30, horizontalalignment='right');\n mat.set_yticklabels(mat.get_yticklabels(), rotation=0, horizontalalignment='right')\n mat.tick_params(axis='x', colors='white')\n mat.tick_params(axis='y', colors='white')\n plt.show()\n \n return corr", "def _get_correlation(self, clean_samples, batch_properties):\n columns = self.options.correlation.columns\n clean_column_ids = []\n if columns is None:\n for idx in range(len(self._profile)):\n data_type = self._profile[idx].\\\n profiles[\"data_type_profile\"].selected_data_type\n if data_type not in [\"int\", \"float\"]:\n clean_samples.pop(idx)\n else:\n clean_column_ids.append(idx)\n\n data = pd.DataFrame(clean_samples).apply(pd.to_numeric, errors='coerce')\n means = {index:mean for index, mean in enumerate(batch_properties['mean'])}\n data = data.fillna(value=means)\n\n # Update the counts/std if needed (i.e. if null rows or exist)\n if (len(data) != batch_properties['count']).any():\n adjusted_stds = np.sqrt(\n batch_properties['std']**2 * (batch_properties['count'] - 1) \\\n / (len(data) - 1)\n )\n batch_properties['std'] = adjusted_stds\n # Set count key to a single number now that everything's been adjusted\n batch_properties['count'] = len(data)\n\n # fill correlation matrix with nan initially\n n_cols = len(self._profile)\n corr_mat = np.full((n_cols, n_cols), np.nan)\n\n # then, fill in the correlations for valid columns\n rows = [[id] for id in clean_column_ids]\n corr_mat[rows, clean_column_ids] = np.corrcoef(data, rowvar=False)\n\n return corr_mat", "def cc_cov(r, **kwargs):\r\n sample_corr = r.corr()\r\n n_assets = len(r.columns)\r\n avg_distinct_rho = (sample_corr.values.sum() - n_assets) / (\r\n n_assets * (n_assets - 1)) # Taking avg of off diagonal corr matrix on one side\r\n const_corr = np.full_like(sample_corr, avg_distinct_rho)\r\n np.fill_diagonal(const_corr, 1.)\r\n sd = r.std()\r\n # Convert to cov using statsmodel\r\n const_cov_sm = mh.corr2cov(const_corr, sd)\r\n # Convert to cov using formula and outer product - alternate way is to use sd @ sd.T instead of np.outer(sd, sd) -> yields matrix(mxm)\r\n const_cov = const_corr * np.outer(sd, sd)\r\n return pd.DataFrame(const_cov, columns=r.columns, index=r.columns)", "def coranking_matrix(high_data, low_data):\n n, m = high_data.shape\n high_distance = distance.squareform(distance.pdist(high_data))\n low_distance = distance.squareform(distance.pdist(low_data))\n\n high_ranking = high_distance.argsort(axis=1).argsort(axis=1)\n low_ranking = low_distance.argsort(axis=1).argsort(axis=1)\n\n Q, xedges, yedges = np.histogram2d(high_ranking.flatten(),\n low_ranking.flatten(),\n bins=n)\n\n Q = Q[1:, 1:] # remove rankings which correspond to themselves\n return Q", "def cross_correlation(arr1, arr2):\n faxes = lambda x: tuple(np.arange(x.ndim - 1) + 1)\n\n return pipe(\n arr1,\n dafftn(axes=faxes(arr1)),\n lambda x: daconj(x) * dafftn(arr2, axes=faxes(arr2)),\n daifftn(axes=faxes(arr1)),\n dafftshift(axes=faxes(arr1)),\n lambda x: x.real / arr1[0].size,\n )" ]
[ "0.8153571", "0.8153571", "0.7506318", "0.714195", "0.70782083", "0.6879409", "0.687049", "0.6733009", "0.6535114", "0.6527502", "0.6494434", "0.64640766", "0.64628077", "0.64515585", "0.64164704", "0.64059234", "0.6371729", "0.6369661", "0.6355419", "0.63055253", "0.6285088", "0.6271518", "0.6210062", "0.61105144", "0.6077478", "0.6072725", "0.6063658", "0.6028245", "0.59604347", "0.59440374", "0.5941153", "0.5923463", "0.5923381", "0.5921164", "0.59005034", "0.58991736", "0.5867016", "0.5866334", "0.5851454", "0.58493584", "0.58242846", "0.5808976", "0.5782969", "0.57675093", "0.5760989", "0.5740344", "0.5733032", "0.5731343", "0.57293355", "0.572705", "0.57215047", "0.5699027", "0.5695344", "0.56849504", "0.56846905", "0.5679378", "0.5678305", "0.5659235", "0.5650627", "0.56458837", "0.5641879", "0.56396264", "0.563805", "0.56380177", "0.56213343", "0.5617183", "0.5616159", "0.5612962", "0.56122965", "0.5611864", "0.56035024", "0.5571415", "0.5565992", "0.55512697", "0.55383605", "0.5532071", "0.5529133", "0.5519572", "0.5509417", "0.55070055", "0.55035305", "0.54993314", "0.5498972", "0.5476041", "0.547514", "0.5474505", "0.54646003", "0.5452162", "0.545195", "0.5449109", "0.5448651", "0.5448217", "0.5437187", "0.54337764", "0.54296356", "0.5422507", "0.5421045", "0.54204124", "0.541012", "0.5404876" ]
0.8082999
2
Ensure the start and end are appropriately placed in the solution.
def _fix_entrances(self, solution): # prune if start is found in solution if self.start in solution: i = solution.index(self.start) solution = solution[i+1:] # fix solution so it doesn't overlap endpoints if not self._on_edge(self.end): [solution] = [solution[:-1]] return solution
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recheckPosition(self):\n self.start = self.bounds[0].pos\n self.end = self.bounds[1].pos", "def _adjustRange(self, start, end):\n adjusted_start = start\n if self._start:\n if end < self._start:\n return None\n adjusted_start = max(self._start, start)\n \n adjusted_end = end\n if self._end:\n if self._end < start:\n return None\n adjusted_end = min(self._end, end)\n \n return (adjusted_start, adjusted_end)", "def validate(self):\n if self._inc_begin is None:\n raise ValueError((\"TimeRange {self} missing begin point\")\n .format(self=self))\n if self._exc_end is None:\n raise ValueError((\"TimeRange {self} missing end point\")\n .format(self=self))", "def test_validate_begin_equals_end():\n with pytest.raises(InvalidSegmentError):\n _validate([[1, 2], [5, 5]])", "def test_no_same_start_and_end(self):\n\n # pylint: disable=line-too-long\n rule1 = \"DTSTART:20180501T210000Z RRULE:FREQ=YEARLY;BYDAY=SU;BYSETPOS=1;BYMONTH=1;UNTIL=20280521T210000Z\" # noqa\n task = mommy.make(\"tasking.Task\", timing_rule=rule1)\n\n # Delete any autogenerated occurrences\n # pylint: disable=no-member\n TaskOccurrence.objects.all().delete()\n\n # we should have 9 instead of 10 occurrences because the very last\n # one would start at 9pm and end at 9pm\n self.assertEqual(\n 9,\n generate_task_occurrences(task=task, timing_rule=task.timing_rule).count(),\n )", "def fix_addresses(start=None, end=None):\n if start in (None, idaapi.BADADDR):\n start = idaapi.cvar.inf.minEA\n\n if end in (None, idaapi.BADADDR):\n end = idaapi.cvar.inf.maxEA\n\n return start, end", "def check_interval_bounds(begin, end):\n if begin.get_midpoint() >= end.get_midpoint():\n return False\n\n if begin.get_radius() is not None and end.get_radius() is not None:\n if begin.get_midpoint() - begin.get_radius() > \\\n end.get_midpoint() - end.get_radius():\n return False\n\n return True", "def clean(self):\n super(Event, self).clean()\n\n if self.start and self.end and self.end < self.start:\n raise ValidationError({'start': \"Start time must be before end time\"})", "def test_start_equals_end_is_fine(self):\n with stdin_as_string(self.ifile):\n self.assertLines(\n [\"-i\", \"0-0\"],\n [\n \"id\",\n \"0\",\n ],\n )\n\n with stdin_as_string(self.ifile):\n self.assertLines(\n [\"-i\", \"3-3\"],\n [\n \"id\",\n \"3\",\n ],\n )", "def simulatedate_checkinput(start, end):\n start_year, start_month, start_day = parse_string_datetime(start)\n end_year, end_month, end_day = parse_string_datetime(end)\n if datetime_checkinput(start_year, start_month, start_day) == 0 and datetime_checkinput(end_year, end_month, end_day) == 0:\n start_time = datetime.datetime(start_year, start_month, start_day)\n end_time = datetime.datetime(end_year, end_month, end_day)\n if start_time < end_time:\n return 0\n else:\n raise Invaliddatetimeinput", "def check_consistent(self):\n # * END LIST The end list itself must be consistent.\n # ** Each end must be of understood type\n # ** Each end must have a valid sequence or no sequence\n # ** There must be no more than one instance of each name\n # ** WARN if there are ends with no namecounts\n # * TILE LIST\n # ** each tile must be of understood type (must parse)\n # ** ends in the tile list must be consistent (must merge)\n # ** there must be no more than one tile with each name\n # self.tiles.check_consistent()\n endsfromtiles = self.tiles.glues_from_tiles()\n\n # ** WARN if any end that appears does not have a complement used or vice versa\n # ** WARN if there are tiles with no name\n # * TILE + END\n # ** The tile and end lists must merge validly\n # (checks sequences, adjacents, types, complements)\n self.glues | endsfromtiles\n\n # ** WARN if tilelist has end references not in ends\n # ** WARN if merge is not equal to the endlist\n # ** WARN if endlist has ends not used in tilelist\n # * ADAPTERS / SEEDS\n # SEED stuff was here", "def test_create_one_start_abs(check_ranges, accounts, nft):\n nft.transferRange(accounts[4], 2, 1000, {\"from\": accounts[1]})\n check_ranges([(1, 2), (1000, 10001)], [(10001, 20001)], [(20001, 30001)], [(2, 1000)])", "def test_validate_begin_greater_than_end():\n with pytest.raises(InvalidSegmentError):\n _validate([[1, 2], [5, 3]])", "def __init__(self, start, end):\n self.start = start\n self.end = end", "def is_legal(self, start, end) -> bool:\n return self.board(end) == 0 \\\n and self.board(start) > 0 \\\n and self._check_zone_locks(start, end) \\\n and self.exists_path(start, end)", "def _check_start_end_acceptable(start: str, end: str) -> None:\n\n char_regex = regex.compile(\"[A-Z]+\")\n\n if not char_regex.fullmatch(start) or not char_regex.fullmatch(end):\n raise ValueError(\"start and end must be characters\")\n\n _check_end_after_start(start, end)", "def test_set_begin_and_end_for_emp(self):\n start = timezone.make_aware(dt.datetime(2016, 6, 3, 6, 30))\n stop = timezone.make_aware(dt.datetime(2016, 6, 3, 10, 30))\n expected_begin = timezone.make_aware(dt.datetime(2016, 6, 3, 6, 30))\n expected_end = timezone.make_aware(dt.datetime(2016, 6, 2, 14, 32))\n\n example_employee = RawClockData.objects.first()\n begin, end = set_begin_and_end_for_emp(\n employee=example_employee,\n start=start,\n stop=stop,\n )\n\n self.assertEqual(expected_begin, begin)\n self.assertEqual(expected_end, end)", "def add_range(self, start, end) -> bool:\n start = _normalize_datetime(start)\n end = _normalize_datetime(end)\n assert end > start\n\n if self._start_time is None:\n self._start_time = start\n\n if start < self._start_time:\n delta = int((self._start_time - start).total_seconds() / 60)\n self._start_time = start\n self._backing_int = self._backing_int << delta\n\n start_idx = self._datetime_to_index(start)\n end_idx = self._datetime_to_index(end)\n idx_range = end_idx - start_idx\n range_mask = ((1 << (idx_range + 1)) - 1) << start_idx\n\n has_overlap = (self._backing_int & range_mask) > 0\n self._backing_int |= range_mask\n return has_overlap", "def gen_start_end_times(start_time=[6, 0, 0], end_time=[23, 0, 0]):\n\n now = datetime.now()\n year = now.year\n month = now.month\n day = now.day\n\n start_time = datetime(\n year, month, day, start_time[0], start_time[1], start_time[2], 0\n )\n\n end_time = datetime(year, month, day, end_time[0], end_time[1], end_time[2], 0)\n\n if end_time < now:\n end_time += timedelta(days=1)\n start_time += timedelta(days=1)\n\n return start_time, end_time", "def shortest_path(start, end, verbose=0, termination_check=True):\n # Set up\n dict_from_start = d()\n dict_form_end = d()\n agenda_start = Queue()\n agenda_end = Queue()\n operations = rubik.quarter_twists\n\n agenda_start.put_elt(SearchNode((start, None), None))\n agenda_end.put_elt(SearchNode((end, None), None))\n flip = True\n if termination_check: counter = 0\n\n if verbose:\n print \"====================================================================================\"\n print \"start: {0}\".format(start)\n print \"end: {0}\".format(end)\n print \"State: {0}\".format(\"SetUp\")\n print \"dict_start: {0}\".format(dict_from_start)\n print \"dict_end: {0}\".format(dict_form_end)\n print \"agenda_start: {0}\".format(agenda_start)\n print \"agenda_end: {0}\".format(agenda_end)\n print \"====================================================================================\"\n\n while not agenda_start.is_empty() and not agenda_end.is_empty():\n if verbose:\n print \"====================================================================================\"\n print \"State: {0}\".format(\"InLoop_s\")\n if flip:\n print \"At Start\"\n print \"dict_start: {0}\".format(dict_from_start)\n print \"agenda_start: {0}\".format(agenda_start)\n if not flip:\n print \"At End\"\n print \"dict_end: {0}\".format(dict_form_end)\n print \"agenda_end: {0}\".format(agenda_end)\n print \"calculating...\"\n\n # Flipping style\n if flip:\n _dict = dict_from_start\n _agenda = agenda_start\n _other = dict_form_end\n else:\n _dict = dict_form_end\n _agenda = agenda_end\n _other = dict_from_start\n # do one level permutation\n current = _agenda.pop_elt()\n _dict[current.name] = current\n children = [SearchNode((rubik.perm_apply(op, current.value), op), current) for op in operations]\n # dynamic programming\n for child in children:\n if child.name not in _dict:\n _agenda.put_elt(child)\n\n if verbose:\n print \"State: {0}\".format(\"InLoop_e\")\n if flip:\n print \"dict_start: {0}\".format(dict_from_start)\n print \"agenda_start: {0}\".format(agenda_start)\n else:\n print \"dict_end: {0}\".format(dict_form_end)\n print \"agenda_end: {0}\".format(agenda_end)\n print \"====================================================================================\"\n\n # Termination Check\n if termination_check:\n if len(_dict) >= 100 and counter == 0:\n counter += 1\n print \"100\"\n elif len(_dict) >= 1000 and counter == 1:\n counter += 1\n print \"1000\"\n elif len(_dict) >= 10000 and counter == 2:\n counter += 1\n print \"10000\"\n elif len(_dict) >= 100000 and counter == 3:\n counter += 1\n print \"100000\"\n elif len(_dict) >= 1000000 and counter == 4:\n counter += 1\n print \"1000000\"\n # elif len(_dict) >= 2000000 and counter == 5:\n # counter += 1\n # print \"2000000\"\n # elif len(_dict) >= 2000000 and counter == 6:\n # counter += 1\n # print \"3000000\"\n if len(_dict) >= 3674160//2:\n break\n\n # Flip\n flip = not flip\n # Terminate condition\n if verbose: print \"check Termination..........\"\n if current.name in _other:\n from_start = dict_from_start[current.name].get_path()\n from_end = [rubik.perm_inverse(op) for op in reversed(dict_form_end[current.name].get_path())]\n if verbose: print \"Result: {0}\".format([rubik.quarter_twists_names[op] for op in from_start+from_end])\n return from_start + from_end\n if verbose: print \"Done checking.\"\n\n if verbose: print \"No solution\"\n return None", "def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)", "def __init__(__self__, *,\n end: pulumi.Input[str],\n start: pulumi.Input[str]):\n pulumi.set(__self__, \"end\", end)\n pulumi.set(__self__, \"start\", start)", "def bounds(self, start=None, finish=None):\n lower = start if start is not None else self.limits[0]\n upper = finish if finish is not None else self.limits[1]\n\n lower = lower + self.offsets[0]\n upper = upper + self.offsets[1]\n\n return (lower, upper)", "def test_start_and_end_equal(self):\n start = timezone.now()\n end = start\n with six.assertRaisesRegex(self, ValidationError, self.msg):\n validate_timeframe(start, end)", "def _validate_trajectory_transition(subgaits, from_subgait_names, to_subgait_names):\n for from_subgait_name, to_subgait_name in zip(from_subgait_names, to_subgait_names):\n\n if not all(name not in ('start', 'end', None) for name in (from_subgait_name, to_subgait_name)):\n continue # a start or end point can not be compared to a subgait\n\n from_subgait = next((subgait for subgait in subgaits if subgait.subgait_name == from_subgait_name), None)\n to_subgait = next((subgait for subgait in subgaits if subgait.subgait_name == to_subgait_name), None)\n\n if not from_subgait.validate_subgait_transition(to_subgait):\n raise NonValidGaitContent(msg='End setpoint of subgait {sn} to subgait {ns} does not match'\n .format(sn=from_subgait.subgait_name, ns=to_subgait.subgait_name))", "def get_correct_goes_index(goes_index, begin, end):\n difference = timedelta(seconds=1)\n begin = get_datetime(begin)\n end = get_datetime(end)\n found_begin = False\n found_end = False\n\n for index in goes_index:\n new_index = get_datetime(str(index)[0:19])\n\n if ((new_index == begin or new_index == begin-difference or\n new_index == begin-2*difference) and not found_begin):\n found_begin = True\n begin = index\n continue\n\n if ((new_index == end or new_index == end+difference or\n new_index == end+2*difference) and not found_end):\n found_end = True\n end = index\n continue\n\n return begin, end", "def test_create_one_end_abs(check_ranges, accounts, nft):\n nft.transferRange(accounts[4], 29000, 30000, {\"from\": accounts[3]})\n check_ranges([(1, 10001)], [(10001, 20001)], [(20001, 29000), (30000, 30001)], [(29000, 30000)])", "def check_variant_start_and_end_positions(\n input_df: pd.DataFrame, start_pos_col: str, end_pos_col: str, filename: str\n) -> tuple:\n errors = \"\"\n warnings = \"\"\n\n if any(input_df[start_pos_col] > input_df[end_pos_col]):\n errors = (\n f\"{filename}: Your variants file has record(s) that have an end position \"\n \"value less than the start position value. Please update your file to be consistent. \"\n \"When we annotate using the genome-nexus-annotation-pipeline, the records with this \"\n \"position discrepancy will show a blank reference and variant allele.\\n\"\n )\n return errors, warnings", "def analyze(self, start, end):\n return", "def __init__ (self, start, end):\n\n self.start = start\n self.end = end", "def test_start_before_end(self):\n start = timezone.now()\n end = start + timedelta(seconds=1)\n actual = validate_timeframe(start, end)\n expected = None\n self.assertEqual(actual, expected)", "def isValid(self, start, end):\n for s in self.skip:\n if start <= s[0] <= end or start <= s[1] <= end:\n return False\n return True", "def check(self):\n self.lower_bound(5e-4)\n self.upper_bound(5e2)", "def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])", "def _test_out_of_range(self):\n self.cdbconf.setup('KKG')\n self.cdbconf.setConfiguration('CUSTOM_OPT')\n az, el, latitude = [radians(50)] * 3\n site_info = {'latitude': latitude}\n self.p.setup(site_info, self.source, self.device)\n self.p.setRewindingMode('AUTO')\n offset = 20\n max_limit = self.device.getMaxLimit() \n min_limit = self.device.getMinLimit()\n Pis = max_limit - offset/2\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setPosition(Pis)\n time.sleep(0.2) # Wait a bit for the setup\n max_rewinding_steps = (max_limit - min_limit) // self.device.getStep()\n expected = Pis - max_rewinding_steps*self.device.getStep() + offset\n self.source.setAzimuth(az)\n self.source.setElevation(el)\n self.p.startUpdating('MNG_TRACK', 'ANT_NORTH', az, el, None, None)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setOffset(offset)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.assertEqual(self.device.getActPosition(), expected)", "def check_end(self):\n return [self.x, self.y] == self.end_pos", "def dates_intervals_are_overlapped(start_1, end_1, start_2, end_2):\n return end_1 >= start_2 and end_2 >= start_1", "def _check_start_and_end_steps(self):\n start_step = end_step = None\n\n if self.start_step is not None:\n if hasattr(self, self.start_step):\n start_step = getattr(self, self.start_step)\n else:\n raise ValueError(\n \"start_step {0!r} not found\".format(\n self.start_step))\n\n if self.end_step is not None:\n if hasattr(self, self.end_step):\n end_step = getattr(self, self.end_step)\n else:\n raise ValueError(\n \"end_step {0!r} not found\".format(\n self.end_step))\n\n return start_step, end_step", "def checkBasics(self, transform):\n for fromPoint in self.fromIter():\n toPoint = transform.forwardTransform(fromPoint)\n roundTripPoint = transform.reverseTransform(toPoint)\n for i in range(2):\n self.assertAlmostEqual(fromPoint[i], roundTripPoint[i])\n\n for deltaFrom in (\n Extent2D(0),\n Extent2D(0.1, -0.1),\n Extent2D(-0.15, 0.1),\n ):\n tweakedFromPoint = fromPoint + deltaFrom\n tweakedToPoint = transform.forwardTransform(tweakedFromPoint)\n linToPoint = transform.linearizeForwardTransform(\n fromPoint)(tweakedFromPoint)\n linRoundTripPoint = transform.linearizeReverseTransform(\n toPoint)(tweakedToPoint)\n for i in range(2):\n self.assertAlmostEqual(\n tweakedToPoint[i], linToPoint[i], places=2)\n self.assertAlmostEqual(\n tweakedFromPoint[i], linRoundTripPoint[i], places=2)", "def test_pasture_overlap():\n a = Pasture([(0, 1)])\n b = Pasture([(0, 1), (0, 2)])\n c = Pasture([(0, 2), (0, 3)])\n\n assert a.overlaps(b)\n assert b.overlaps(a)\n\n assert b.overlaps(c)\n assert c.overlaps(b)\n\n assert not a.overlaps(c)\n assert not c.overlaps(a)\n\n p = Player(\"p0\", wood=20)\n p.build_pastures(a)\n p.build_pastures(c)\n\n p = Player(\"p0\", wood=20)\n p.build_pastures(c)\n p.build_pastures(a)\n\n p = Player(\"p0\", wood=20)\n p.build_pastures([a, c])\n\n p = Player(\"p0\", wood=20)\n p.build_pastures([c, a])\n\n p = Player(\"p0\", wood=20)\n p.build_pastures(b)\n\n p = Player(\"p0\", wood=20)\n with pytest.raises(AgricolaLogicError):\n p.build_pastures([a, b])\n\n p = Player(\"p0\", wood=20)\n p.build_pastures(a)\n with pytest.raises(AgricolaLogicError):\n p.build_pastures(b)\n\n p = Player(\"p0\", wood=20)\n p.build_pastures(b)\n with pytest.raises(AgricolaLogicError):\n p.build_pastures(a)\n\n p = Player(\"p0\", wood=20)\n with pytest.raises(AgricolaLogicError):\n p.build_pastures([c, b])\n\n p = Player(\"p0\", wood=20)\n p.build_pastures(c)\n with pytest.raises(AgricolaLogicError):\n p.build_pastures(b)\n\n p = Player(\"p0\", wood=20)\n p.build_pastures(b)\n with pytest.raises(AgricolaLogicError):\n p.build_pastures(c)", "def get_time_constraint(start, end):\n \n date_pattern = '([0-9]{4})-([0-9]{1,2})-([0-9]{1,2})'\n if start.lower() == 'none':\n start = None\n else:\n assert re.search(date_pattern, start)\n\n if end.lower() == 'none':\n end = None\n else:\n assert re.search(date_pattern, end)\n\n if not start and not end:\n time_constraint = iris.Constraint()\n elif (start and not end) or (start == end):\n year, month, day = start.split('-') \n time_constraint = iris.Constraint(time=iris.time.PartialDateTime(year=int(year), month=int(month), day=int(day)))\n elif end and not start:\n year, month, day = end.split('-') \n time_constraint = iris.Constraint(time=iris.time.PartialDateTime(year=int(year), month=int(month), day=int(day)))\n else: \n start_year, start_month, start_day = start.split('-') \n end_year, end_month, end_day = end.split('-')\n time_constraint = iris.Constraint(time=lambda t: iris.time.PartialDateTime(year=int(start_year), month=int(start_month), day=int(start_day)) <= t <= iris.time.PartialDateTime(year=int(end_year), month=int(end_month), day=int(end_day)))\n\n return time_constraint", "def test_split_ranges(self):\n start = datetime.utcnow() - pd.Timedelta(\"5H\")\n end = datetime.utcnow() + pd.Timedelta(\"5min\")\n delta = pd.Timedelta(\"1H\")\n\n ranges = QueryProvider._calc_split_ranges(start, end, delta)\n self.assertEqual(len(ranges), 5)\n self.assertEqual(ranges[0][0], start)\n self.assertEqual(ranges[-1][1], end)\n\n st_times = [start_tm[0] for start_tm in ranges]\n for end_time in (end_tm[1] for end_tm in ranges):\n self.assertNotIn(end_time, st_times)\n\n end = end + pd.Timedelta(\"20min\")\n ranges = QueryProvider._calc_split_ranges(start, end, delta)\n self.assertEqual(len(ranges), 5)\n self.assertEqual(ranges[0][0], start)\n self.assertEqual(ranges[-1][1], end)", "def adjusted_by(self, start=None, end=None):\n start_pos = self.start\n end_pos = self.end\n if start is not None:\n # We don't use += because that would modify our self.start in place.\n start_pos = start_pos + geom_utils.ToNumpy3Vector(start) # pylint: disable=g-no-augmented-assignment\n if end is not None:\n end_pos = end_pos + geom_utils.ToNumpy3Vector(end) # pylint: disable=g-no-augmented-assignment\n return BoundingBox(start=start_pos, end=end_pos)", "def test_simsam_range_correct_number_of_output(self):\r\n actual = qiime.simsam.simsam_range(\r\n self.tutorial_otu_table, self.tutorial_tree,\r\n [1], [0.1], self.tutorial_map)\r\n self.assertEqual(len(list(actual)), 1)\r\n actual = qiime.simsam.simsam_range(\r\n self.tutorial_otu_table, self.tutorial_tree,\r\n [1, 2], [0.1], self.tutorial_map)\r\n self.assertEqual(len(list(actual)), 2)\r\n actual = qiime.simsam.simsam_range(\r\n self.tutorial_otu_table, self.tutorial_tree,\r\n [2], [0.1, 0.001], self.tutorial_map)\r\n self.assertEqual(len(list(actual)), 2)\r\n actual = qiime.simsam.simsam_range(\r\n self.tutorial_otu_table, self.tutorial_tree,\r\n [1, 2], [0.1, 0.001], self.tutorial_map)\r\n self.assertEqual(len(list(actual)), 4)", "def solve(self, start, end):\r\n if start > end:\r\n return -1\r\n mid = ceil((start + end) / 2)\r\n self.comparisons += 1\r\n if self.list[mid] == self.item:\r\n return mid\r\n if self.item < self.list[mid]:\r\n return self.solve(start, mid-1)\r\n return self.solve(mid+1, end)", "def test_out_of_bounds(oob_from, oob_to):\n with pytest.raises(ValueError):\n haversine_vector([oob_from], [oob_to])\n with pytest.raises(ValueError):\n haversine_vector([oob_from], [oob_to], normalize=False)", "def _translate_range(self, len_, start, end):\n start = int(start)\n end = int(end)\n if start < 0:\n start += len_\n start = max(0, min(start, len_))\n if end < 0:\n end += len_\n end = max(-1, min(end, len_ - 1))\n return start, end", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 0\n s1.hour_to = 21\n s1.min_to = 59\n s1.interval = 60*60*3 \n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 0\n s2.hour_to = 21\n s2.min_to = 59\n s2.interval = 60*60*3 \n\n r = number_expected([s1,s2],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 2 )", "def make_sure_between(val, start=None, end=None):\n if start is not None:\n if val < start:\n return start\n if end is not None:\n if val > end:\n return end\n return val", "def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2", "def __init__(self, start: long, end: long):\n ...", "def build_constraints_boundaries(self):\n\n # Trapezoidal and Hermite-Simpson methods can't compute\n # defects at the last node contrary to pseudospectral methods\n coll_method = self.options['tr_method'] in [\n 'trapezoidal', 'hermite-simpson']\n n_nodes = self.problem.prm['n_nodes'] - \\\n 1 if coll_method else self.problem.prm['n_nodes']\n\n # Defects lower and upper boundaries\n defects_low = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n defects_upp = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n\n # Path lower and upper boundaries\n path_low = np.hstack([self.problem.low_bnd.path]\n * (self.problem.prm['n_nodes']))\n path_upp = np.hstack([self.problem.upp_bnd.path]\n * (self.problem.prm['n_nodes']))\n\n # Events lower and upper boundaries\n event_low = self.problem.low_bnd.event\n event_upp = self.problem.upp_bnd.event\n\n # Assembly of the lower and upper boundaries vectors\n low = np.concatenate((defects_low, path_low, event_low))\n upp = np.concatenate((defects_upp, path_upp, event_upp))\n\n return low, upp", "def overlap(t1start, t1end, t2start, t2end):\n\n return (t1start <= t2start <= t1end) or (t2start <= t1start <= t2end)", "def extend_pos(self, start: int, end: int) -> None:", "def check_positions_in_range(self):\n reachable = 0\n total = 0\n reachable, total = self.check_positions_in_range_for_list(reachable, total, self.close_positions_world)\n reachable, total = self.check_positions_in_range_for_list(reachable, total, self.medium_positions_world)\n reachable, total = self.check_positions_in_range_for_list(reachable, total, self.far_positions_world)\n\n return float(reachable) / float(total)", "def test_create_one_start(check_ranges, accounts, nft):\n nft.transferRange(accounts[4], 10002, 12001, {\"from\": accounts[2]})\n check_ranges([(1, 10001)], [(10001, 10002), (12001, 20001)], [(20001, 30001)], [(10002, 12001)])", "def extract_from_range(tgt_start, tgt_end, src_start, src_end, max_phrase_len):\n # print(\"rages\", tgt_start, tgt_end, src_start, src_end)\n if tgt_end < 0:\n return \n # If `src_align_idx` out of the `src_start` and `src_target`.\n for src_align_idx, tgt_align_idx in alignment:\n # target align point\n # sorce align point out of range\n if ((tgt_start <= tgt_align_idx <= tgt_end) and \n (src_align_idx < src_start or src_align_idx > src_end)): \n return\n phrase_set = set()\n ts = tgt_start # For increment\n while True:\n te = min(tgt_end, ts+max_phrase_len-1) # For decrement\n # te = tgt_end \n while True:\n # Add phrase pair (src_start, src_end, tgt_start, tgt_end)\n src_phrase = \" \".join(src_sent[i] for i in range(src_start,src_end+1))\n tgt_phrase = \" \".join(tgt_sent[i] for i in range(ts,te+1))\n phrase_set.add(((src_start, src_end+1), src_phrase, tgt_phrase))\n te+= 1\n # Add phrase until `te` aligned or out of range\n if te in tgt_aligned or te == tgt_len:\n break\n ts-=1\n # Add phrase until `te` aligned or out of range\n if ts in tgt_aligned or ts < 0:\n break\n \n return phrase_set", "def is_span_valid(self)->bool:\n if self.get_start_offset() < 0 or self.get_end_offset() < 0:\n logger.error(\"Start and end of position of the fragment must be non-negative: %d, %d\"\n %(self.get_start_offset(), self.get_end_offset()))\n return False\n if self.get_start_offset() >= self.get_end_offset():\n logger.error(\"End position of the fragment must be greater than the starting one: start=%d, end=%d\"%(self.get_start_offset(), self.get_end_offset()))\n return False\n return True", "def __init__(self, start, stop):\n if start > stop:\n raise IndexError(f'range is invalid: start={start} > stop={stop}')\n self._start = start - 1\n self._stop = stop - 1", "def isRangeValid(self) -> bool:\n ...", "def _validateInputs(self):\n if self.args[\"Counties\"] == [] and self.args[\"BBox\"] == None:\n raise Exception(\"Invalid arguments provided. Must provide either a geographical bounding box or a list of counties.\")\n\n if self.args[\"StartDateTime\"] > self.args[\"EndDateTime\"]:\n raise Exception(\"Invalid arguments provided. StartDateTime cannot be after EndDateTime\")", "def _step(self, start):\n #angle = np.random.uniform(0,2*np.pi) # only 2-dim\n #direction = angle2vec(angle)\n\n angle = np.random.randn(self.dim)\n direction = angle / la.norm(angle)\n \n if not self.query(start):\n print(f\"Given an invalid point! {start}\")\n \n testCounter = 0\n max_iter = 1000\n \n ## Case for adding to direction ##\n high = 1\n testCounter = 0\n while(self.query(start + high*direction)):\n high = high*2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_plus high loop with: \\n\\\n high = {high}\\n\")\n \n low = high/2\n testCounter = 0\n while(not self.query(start + low*direction)):\n low = low/2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_plus low loop with: \\n\\\n low = {low}\\n\")\n \n # now we know that (start + low * direction) is inside\n #assert(zonoid_membership_def(A, start+low*direction))\n # and that (start + high * direction) is outside\n #assert(not zonoid_membership_def(A, start+high*direction))\n \n tol = 1e-5\n t_plus = (high-low)/2\n old_t = 1\n current = start\n testCounter = 0\n while(abs(t_plus-old_t) > tol):\n old_t = t_plus\n t_plus = (high+low)/2\n testpoint = current + t_plus*direction\n if( self.query(testpoint) ):\n low = t_plus\n else:\n high = t_plus\n \n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_plus loop with: \\n\\\n t_plus = {t_plus}\\n\\\n t_old = {t_old}\\n\\\n high = {high}\\n\\\n low = {low}\\n\")\n t_plus = old_t\n \n ## Case for subtracting from direction\n high = -1\n testCounter = 0\n while(self.query(start + high*direction)):\n high = high*2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_minus high loop with: \\n\\\n high = {high}\\n\")\n \n low = high/2\n testCounter = 0\n while(not self.query(start + low*direction)):\n low = low/2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_minus low loop with: \\n\\\n low = {low}\\n\")\n \n # now we know that (start + low * direction) is inside\n #assert(zonoid_membership_def(A, start+low*direction))\n # and that (start + high * direction) is outside\n #assert(not zonoid_membership_def(A, start+high*direction))\n \n tol = 1e-10\n t_minus = (high-low)/2\n old_t = 1\n current = start\n testCounter = 0\n while(abs(t_minus-old_t) > tol):\n old_t = t_minus\n t_minus = (high+low)/2\n testpoint = current + t_minus*direction\n if( self.query(testpoint) ):\n low = t_minus\n else:\n high = t_minus\n \n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_minus loop with: \\n\\\n t_minus = {t_minus}\\n\\\n t_old = {t_old}\\n\\\n high = {high}\\n\\\n low = {low}\\n\")\n t_minus = old_t\n \n # Make the step\n final_t = np.random.uniform(t_minus, t_plus)\n #print(f\"Final t = {final_t}\")\n \n # remove extra returns for now for other compatibility\n return start + final_t*direction #, start+t_plus*direction, start+t_minus*direction", "def is_between(value, start, end, including_start=False, including_end=False):\n if not including_start and not including_end: # not include both start and end\n if (start < value < end):\n return True\n elif (start > end) and (start < value <= (2**m - 1) or 0 <= value < end):\n return True\n elif (start == end) and (value != start):\n return True\n return False\n elif not including_start and including_end: # include end but not the start\n if value == end:\n return True\n elif (start < value <= end):\n return True\n elif (start > end) and ((start < value <= (2**m - 1)) or (0 <= value <= end)):\n return True\n elif (start == end) and (value != start):\n return True\n return False\n elif including_start and not including_end: # include start but not the end\n if value == start:\n return True\n elif (start <= value < end):\n return True\n elif (start > end) and (start <= value <= (2**m - 1) or 0 <= value < end):\n return True\n elif (start == end) and (value != end):\n return False\n return False\n else: # include both start and end\n if (start <= value <= end):\n return True\n elif (start > end) and (start <= value <= (2**m - 1) or 0 <= value <= end):\n return True\n elif start == end:\n return True\n return False", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 0\n s1.hour_to = 21\n s1.min_to = 59\n s1.interval = 60*60*3 \n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 0\n s2.hour_to = 21\n s2.min_to = 59\n s2.interval = 60*60*3 \n\n s3 = Schedule()\n s3.hour_from = 0\n s3.min_from = 0\n s3.hour_to = 21\n s3.min_to = 59\n s3.interval = 60*60*3 \n\n\n r = number_expected([s1,s2,s3],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 2 )", "def set_boundary(self, y, start_x, end_x):\n pass", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s = Schedule()\n s.hour_from = 0\n s.min_from = 0\n s.hour_to = 21\n s.min_to = 59\n s.interval = 60*60*3 \n\n r = number_expected([s,],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 2 )", "def clean(self):\n\n if self.dateEnd <= self.dateStart:\n raise ValidationError(\"Start date must be before end date!\")\n \n # If there are bookings in the database with:\n # - same room__id\n # - different booking__id\n # - first date at or after dateStart (but before dateEnd)\n # - last date at or before dateEnd (but after dateStart)\n # Raise ValidationError\n if Booking.objects.filter(room__id=self.room_id).exclude(pk=self.pk).filter(\n Q(dateEnd__gt=self.dateStart, dateStart__lt=self.dateEnd)\n ).exists():\n raise ValidationError(\"Overlapping dates, room has been booked.\")", "def validate_input(start, goal):\r\n if len(start) != 9 or len(goal) != 9: # Only 8-Puzzle board allowed\r\n print(\"Incorrect state space length.\")\r\n return False\r\n\r\n state_dict = {}\r\n for value in start:\r\n if value == \"9\":\r\n print(\"Value '9' out of bound.\") # Value 9 is not in 8-Puzzle\r\n return False\r\n if not value.isdigit():\r\n print(\"Non-integer in state space.\")\r\n return False\r\n if value in state_dict: # Check for repeated values\r\n print(\"Repeated value in state space.\")\r\n return False\r\n state_dict[value] = 1\r\n\r\n for value in goal: # Check goal if is permutation of start\r\n if value not in state_dict:\r\n print(\"Goal state space does not match start state space.\")\r\n return False\r\n state_dict[value] -= 1\r\n\r\n if \"0\" not in state_dict: # Check if one blank cell is present\r\n print(\"No empty cell in state space.\")\r\n return False\r\n\r\n return True", "def can_reach_square(self, start, end):\n raise NotImplementedError", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s = Schedule()\n s.hour_from = 3\n s.min_from = 0\n s.hour_to = 3\n s.min_to = 59\n s.interval = 60*60*6 \n\n r = number_expected([s,],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 0 )", "def check_obstruction(self, start_x, start_y, end_x, end_y, piece):\n\n # Displacement for any single point in the area\n disp_x = end_x - start_x\n disp_y = end_y - start_y\n\n # Piece's area to shift for obstructions\n space = piece.get_area()\n\n # Game board area, initialize check spaces for while loop\n board_space = self._game_board.get_board_area()\n check_x = 0\n check_y = 0\n\n # Assign correct shift value for displacement\n if disp_x > 0:\n shift_x = 1\n elif disp_x == 0:\n shift_x = 0\n else:\n shift_x = -1\n\n if disp_y > 0:\n shift_y = 1\n elif disp_y == 0:\n shift_y = 0\n else:\n shift_y = -1\n\n # For each point in space\n for point in space:\n scale = 1\n # Gradually shift values in piece area up to displacement and check if the space is occupied\n while (check_x, check_y) != (point[0] + disp_x, point[1] + disp_y):\n check_x = point[0] + shift_x * scale\n check_y = point[1] + shift_y * scale\n\n # If an obstruction is found, and it is not a piece meant to be captured\n # ie, a piece in the end-position, return True\n if ((check_x, check_y) not in space) and board_space[check_x][check_y] != \" \":\n if (check_x, check_y) != (point[0] + disp_x, point[1] + disp_y):\n return True\n scale += 1\n # Return False if not obstructed\n return False", "def _initialize_bounds(problem, bounds, get_bound, set_bound):\n for constraint in problem.constraints:\n root_expr = constraint.root_expr\n expr_bounds = Interval(constraint.lower_bound, constraint.upper_bound)\n if root_expr not in bounds:\n set_bound(root_expr, expr_bounds)\n else:\n existing_bounds = get_bound(root_expr)\n new_bounds = existing_bounds.intersect(expr_bounds)\n set_bound(root_expr, new_bounds)", "def checked_positions():\n for base_position in chain([me.shipyard], me.get_dropoffs()):\n x_shipyard = base_position.position.x\n y_shipyard = base_position.position.y\n for x in range(-search_range, search_range):\n for y in range(-search_range, search_range):\n yield hlt.Position(\n x=x_shipyard + x,\n y=y_shipyard + y)", "def test_slice_locations(self):\n self.assertAlmostEqual(self.cheese.origin_slice, self.origin_slice, delta=1)", "def test_data_range(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n lenrange = random.randint(1, 10)\n nreps = random.randint(1, 10)\n\n ex.range = [\"i\", range(lenrange)]\n ex.nreps = nreps\n\n ex.vary[\"X\"][\"along\"] = 0\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n self.assertIn([\"smalloc\", \"X\", nreps * m * n + (nreps - 1) * m], cmds)\n rangeidx = random.randint(0, lenrange - 1)\n repidx = random.randint(0, nreps - 1)\n self.assertIn([\"soffset\", \"X\", repidx * m,\n \"X_%d_%d\" % (rangeidx, repidx)], cmds)", "def _validate_interval(interval: Interval) -> None:\n origin, end = interval\n\n if end < origin:\n raise ValueError(f\"Interval [{origin}, {end}] is not a proper one.\") # pragma: no cover", "def test_spw_id_range(self):\n spw = '23~25'\n ref_idx = [0,1,3]\n self.res=self.run_task(infile=self.rawfile,spw=spw,calmode=self.calmode,outfile=self.outname,outform='ASAP')\n self.assertEqual(self.res,None,\n msg='Any error occurred during calibration')\n self._compare_with_analytic(self.outname, self.line, self.baseline, ref_idx)", "def test_is_mountain_in_range(self):\n self.assertTrue(self.user_location.is_mountain_in_range(self.mountain_one))\n self.assertFalse(self.user_location.is_mountain_in_range(self.mountain_two))", "def validate_options(namespace):\n try:\n start = namespace.start_range\n end = namespace.end_range\n except AttributeError:\n return\n else:\n namespace.ocp_range = None\n del namespace.start_range\n del namespace.end_range\n if start or end:\n start = start if start else 0\n end = end if end else \"\"\n namespace.ocp_range = \"bytes={}-{}\".format(start, end)", "def startAndEnd(self):\n upperRow = 0\n upperCol = 0\n lowerRow = 0\n lowerCol = 0\n if self.selectionMode == kSelectionNone:\n upperRow = self.penRow\n upperCol = self.penCol\n lowerRow = self.penRow\n lowerCol = self.penCol\n elif self.selectionMode == kSelectionAll:\n upperRow = 0\n upperCol = 0\n lowerRow = self.parser.rowCount() - 1\n lowerCol = self.parser.rowWidth(-1)\n elif self.selectionMode == kSelectionBlock:\n upperRow = min(self.markerRow, self.penRow)\n upperCol = min(self.markerCol, self.penCol)\n lowerRow = max(self.markerRow, self.penRow)\n lowerCol = max(self.markerCol, self.penCol)\n elif (self.selectionMode == kSelectionCharacter or\n self.selectionMode == kSelectionLine or\n self.selectionMode == kSelectionWord):\n upperRow = self.markerRow\n upperCol = self.markerCol\n lowerRow = self.penRow\n lowerCol = self.penCol\n if upperRow == lowerRow and upperCol > lowerCol:\n upperCol, lowerCol = lowerCol, upperCol\n elif upperRow > lowerRow:\n upperRow, lowerRow = lowerRow, upperRow\n upperCol, lowerCol = lowerCol, upperCol\n #app.log.detail('start and end', upperRow, upperCol, lowerRow, lowerCol)\n return (upperRow, upperCol, lowerRow, lowerCol)", "def __init__(self, start, end):\n self.start = start\n self.end = end\n delta = end - start\n self.vector = delta", "def test_out_of_bounds(self) -> None:\n\n self.assertIsInstance(self.movement.out_of_bounds(self.pop.get_person(),\n np.array([[0,1]] * 10),np.array([[0,1]] * 10)), np.ndarray)\n self.pop.persons[:,idx.speed] = 1\n self.pop.persons[:,idx.x_axis] = 1.1\n self.pop.persons[:,idx.y_axis] = 1.1\n self.pop.persons[:,idx.x_dir] = 0.5\n self.pop.persons[:,idx.y_dir] = 0.5\n\n self.assertLess(list(self.movement.out_of_bounds(self.pop.get_person(),\n np.array([[0,1]] * 10),np.array([[0,1]] * 10))[:,idx.x_dir]), [0]*10)\n self.assertLess(list(self.movement.out_of_bounds(self.pop.get_person(),\n np.array([[0,1]] * 10),np.array([[0,1]] * 10))[:,idx.x_dir]), [0]*10)\n\n self.pop.persons[:,idx.x_axis] = -0.1\n self.pop.persons[:,idx.y_axis] = -0.1\n self.pop.persons[:,idx.x_dir] = -0.5\n self.pop.persons[:,idx.y_dir] = -0.5\n self.assertGreater(list(self.movement.out_of_bounds(self.pop.get_person(),\n np.array([[0,1]] * 10),np.array([[0,1]] * 10))[:,idx.x_dir]), [0]*10)\n self.assertGreater(list(self.movement.out_of_bounds(self.pop.get_person(),\n np.array([[0,1]] * 10),np.array([[0,1]] * 10))[:,idx.x_dir]), [0]*10)", "def check_optional_range(specific=None, begin=None, end=None):\n if specific and (begin and end):\n raise ValueError('Cannot pass both a range and specific')\n\n if (begin and not end) or (end and not begin):\n raise ValueError(\"Must pass both begin and end for ranges\")", "def __init__(self, begin, end):\n super(sppasInterval, self).__init__()\n\n if isinstance(begin, sppasPoint) is False:\n AnnDataTypeError(begin, \"sppasPoint\")\n\n if isinstance(end, sppasPoint) is False:\n AnnDataTypeError(end, \"sppasPoint\")\n\n if sppasInterval.check_types(begin, end) is False:\n raise AnnDataEqTypeError(begin, end)\n\n if sppasInterval.check_interval_bounds(begin, end) is False:\n raise IntervalBoundsError(begin, end)\n\n # we accept some overlap\n if begin >= end:\n logging.warning('begin ({!s:s} >= end {!s:s})'.format(begin, end))\n\n self.__begin = begin\n self.__end = end", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 30\n s1.hour_to = 23\n s1.min_to = 30\n s1.interval = 60*30\n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 30\n s2.hour_to = 23\n s2.min_to = 30\n s2.interval = 60*60\n\n s3 = Schedule()\n s3.hour_from = 22\n s3.min_from = 0\n s3.hour_to = 23\n s3.min_to = 30\n s3.interval = 60*5\n\n\n r = number_expected([s1,s2,s3],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 25 )", "def checkSolution(self):\n movesToEndblock = self.gridSize - self.changeable[0] - 2\n if self.checkMove(0,movesToEndblock) == 0:\n return 0\n return 1", "def _check_market_place_in_range(self):\n\t\tfor building in self.get_buildings_in_range():\n\t\t\tif building.id == BUILDINGS.MARKET_PLACE_CLASS:\n\t\t\t\tif StaticPather.get_path_on_roads(self.island, self, building) is not None:\n\t\t\t\t\t# a market place is in range\n\t\t\t\t\treturn\n\t\t# no market place found\n\t\tself.session.ingame_gui.message_widget.add(self.position.origin.x, self.position.origin.y, \\\n\t\t 'NO_MARKET_PLACE_IN_RANGE')", "def test_merge_not_fail_start_point_end_point(self):\n path_a = PathFactory.create(name=\"A\", geom=LineString((0, 0), (10, 0)))\n path_b = PathFactory.create(name=\"B\", geom=LineString((10, 0), (20, 0)))\n PathFactory.create(name=\"C\", geom=LineString((0, 0), (0, 10)))\n response = self.client.post(reverse('core:path-drf-merge-path'), {'path[]': [path_a.pk, path_b.pk]})\n self.assertIn('success', response.json())", "def check_types(begin, end):\n try:\n begin.get_midpoint()\n end.get_midpoint()\n except AttributeError:\n return False\n\n return isinstance(begin.get_midpoint(), type(end.get_midpoint()))", "def merge_ranges():", "def test_member_start_end(self):\n entries = {\n 'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'qMemberStart': [datetime(2010, 2, 2, tzinfo=timezone.utc)],\n 'qMemberEnd': [datetime(2010, 5, 2, tzinfo=timezone.utc)],\n },\n 'cn=huidige leden,ou=groups,dc=esmgquadrivium,dc=nl': {\n 'cn': ['Huidige leden'],\n 'member': [],\n }\n }\n clone(entries)\n self.assertEqual(1, Person.objects.count())\n self.assertEqual(1, QGroup.objects.count())\n self.assertEqual(0, Person.objects.first().groups.count()) # Person is not a current group member\n self.assertEqual(1, GroupMembership.objects.count())\n membership = GroupMembership.objects.first()\n self.assertEqual(datetime(2010, 2, 2, tzinfo=timezone.utc), membership.start)\n self.assertEqual(datetime(2010, 5, 2, tzinfo=timezone.utc), membership.end)", "def sub_poset(self, start, end):\n if start < 1 or start > end or end > self.size() + 1:\n raise ValueError(\"Invalid starting or ending value, accepted: 1 <= start <= end <= size+1\")\n if start == end:\n return TamariIntervalPoset(0, [])\n relations = [(i - start + 1, j - start + 1) for (i, j) in self.increasing_cover_relations() if i >= start and j < end]\n relations.extend([(j - start + 1, i - start + 1) for (j, i) in self.decreasing_cover_relations() if i >= start and j < end])\n return TamariIntervalPoset(end - start, relations)", "def clean(self):\r\n query = MidcourseReverificationWindow.objects.filter(\r\n course_id=self.course_id,\r\n end_date__gte=self.start_date,\r\n start_date__lte=self.end_date\r\n )\r\n if query.count() > 0:\r\n raise ValidationError('Reverification windows cannot overlap for a given course.')", "def has_start_stop_acqtamps(self):\n try:\n if not all([isinstance(x, datetime) for x in self.start_acq]):\n raise Exception(\"Invalid value encountered in start_acq\")\n if not all([isinstance(x, datetime) for x in self.stop_acq]):\n raise Exception(\"Invalid value encountered in stop_acq\")\n if not all([len(self) == len(x) for x in [self.start_acq,\\\n self.stop_acq]]):\n raise Exception(\"Lengths of arrays do not match...\")\n return True\n except Exception as e:\n print((repr(e)))\n return False", "def test_create_one_end(check_ranges, accounts, nft):\n nft.transferRange(accounts[4], 19000, 20000, {\"from\": accounts[2]})\n check_ranges([(1, 10001)], [(10001, 19000), (20000, 20001)], [(20001, 30001)], [(19000, 20000)])", "def __init__(self, start_date_str: str, end_date_str: str):\r\n start_date, end_date = create_date_from_string(start_date_str, end_date_str)\r\n if is_date_valid(start_date, end_date):\r\n self.days_range_array = create_days_range(start_date, end_date)\r\n self.months_range_array = create_months_range(self.days_range_array)\r\n else:\r\n raise Exception", "def check_required_range(specific=None, begin=None, end=None):\n\n if not specific and not (begin and end):\n raise ValueError('You must pass some form of date filter')\n\n if specific and (begin and end):\n raise ValueError('Cannot pass both a range and specific dates')\n\n if (begin and not end) or (end and not begin):\n raise ValueError(\"Must pass both begin and end for date range\")", "def generate_possible_coords(starting,a_range,min_cell_distance): \n a_raw= np.arange(a_range[0]+starting,a_range[1]-starting+1,min_cell_distance)\n \n if len(a_raw) == 0:\n return a_raw\n \n if not check_if_range_filled(a_range,a_raw[-1], min_cell_distance):\n # put one more number on the end if the range is not filled\n a_raw= np.arange(a_range[0]+starting,a_range[1],min_cell_distance) \n\n return a_raw", "def validate_input(helper, definition):\n\n start_time_start = definition.parameters.get('start_time_start', None)\n interval = definition.parameters.get('interval', None)\n\n if int(interval) < 86400:\n raise ValueError(\n \"Interval should be 86400 or more for historical data, not {}.\".format(interval))\n\n try:\n # validate start_time_start format:\n if start_time_start:\n datetime.strptime(\n start_time_start, '%m/%d/%Y %H:%M:%S')\n except ValueError:\n raise ValueError(\n \"Incorrect data format, time should be MM/DD/YYYY hh:mm:ss\")\n\n enddt = datetime.utcnow().date() - timedelta(3)\n end_time = datetime.combine(enddt, datetime.max.time())\n start_time_start = datetime.strptime(start_time_start, '%m/%d/%Y %H:%M:%S')\n if start_time_start >= end_time:\n raise ValueError(\n \"Begin Date must be at least 3 days ago. Please enter a time before {}.\".format(end_time.strftime('%m/%d/%Y %H:%M:%S')))\n pass", "def test_real_range_constraint_validation():\n\n # Test valid values OK\n minimum = 1\n maximum = 2\n c = RealRangeConstraint(name=\"Property Band gap\",minimum=minimum,maximum=maximum)\n\n # Test minimum must be less than maximum\n minimum = 3\n maximum = 2\n try:\n c = RealRangeConstraint(name=\"Property Band gap\",minimum=minimum,maximum=maximum)\n assert False, \"RealRangeConstraint should require that minimum be less than maximum\"\n except CitrinationClientError:\n pass\n\n # Test values must be castable to float\n minimum = {}\n maximum = 2\n try:\n c = RealRangeConstraint(name=\"Property Band gap\",minimum=minimum,maximum=maximum)\n assert False, \"RealRangeConstraint should require that minimum and maximum be castable to floats\"\n except CitrinationClientError:\n pass" ]
[ "0.68125606", "0.6379375", "0.6144579", "0.60662675", "0.6036222", "0.6026443", "0.59448326", "0.58304745", "0.58173734", "0.5811092", "0.57554084", "0.5728628", "0.5727028", "0.5721812", "0.5717647", "0.57121235", "0.5684069", "0.5680649", "0.56578076", "0.5622473", "0.5615131", "0.55987024", "0.55940014", "0.5572318", "0.5569193", "0.55659366", "0.5546586", "0.5541315", "0.55348337", "0.55165625", "0.5504494", "0.5467446", "0.5463169", "0.5451673", "0.5445094", "0.54349875", "0.5432644", "0.5429197", "0.5428532", "0.5414532", "0.54068357", "0.53951275", "0.5388941", "0.53855526", "0.53729194", "0.53714305", "0.53673226", "0.5364817", "0.53629124", "0.5353574", "0.5348257", "0.5312906", "0.5307043", "0.53041106", "0.52936345", "0.52926624", "0.5279652", "0.5279325", "0.5275391", "0.5273942", "0.5269479", "0.5263694", "0.52600414", "0.5258144", "0.52524596", "0.52508634", "0.52456653", "0.52438504", "0.52382606", "0.5234614", "0.52263606", "0.522482", "0.5222872", "0.52224714", "0.52161527", "0.52127767", "0.5211679", "0.520973", "0.5207807", "0.52035326", "0.5203362", "0.5202677", "0.5202466", "0.52013654", "0.5199409", "0.519878", "0.5197896", "0.51965857", "0.51881707", "0.51859444", "0.5183593", "0.5177139", "0.5176958", "0.5174535", "0.5172633", "0.51725674", "0.5171813", "0.517011", "0.5166768", "0.5163557" ]
0.61045414
3
the black side first to place a disc, add output to black_prompt responses and to white_prompt requests, vice versa
def play(self, turn): # global black_prompt, white_prompt, res, pi, board if turn % 2 == 0: prompt, requests_add, responses_add, color_to_play = self.bp, self.bp, self.wp, BLACK print("pure") res = pure_MCTS.UCTAlg(json=prompt).run(time_limit=1) else: prompt, requests_add, responses_add, color_to_play = self.wp, self.wp, self.bp, WHITE print("alpha") res = mcts.uctAlg.UCTAlg(predict_model=player, json=prompt, mode='comp').run(time_limit=1)[0] print(res) self.board.disc_place(color_to_play, res[0], res[1]) # record steps to board dct = {'x': res[0], 'y': res[1]} requests_add["responses"].append(dct) responses_add["requests"].append(dct)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hidden_message():\n print(\"\")\n print(f\"{YELLOW}[{MIDDLE_DOT}]{RESET} \"\n \"Choose ZWC option (1 - Encode / 2 - Decode): \", end=\"\")\n option = int(input().lower())\n if option == 1:\n encode_text()\n elif option == 2:\n print(f\"{GREEN}[+]{RESET} Decoded Message: \" + decode_text())", "def handle_commands_preset(self,cl,addr) :\n self.curDir = ['CTF','Challenges','tempUser'+str(random.randint(100,999))]\n try :\n client = cl\n if self.curDir != [] : \n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~/{}$ \".format('/'.join(self.curDir))\n else :\n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~$ \"\n self.userp = userp.encode()\n client.send(\"\"\"\nCustom Shell Server With Limited Functionality\n\nNew User Login from {} at {}\n \\n\"\"\".format(addr[0],time.ctime()).encode())\n shellin = \"\" \n while True:\n if self.curDir != [] : \n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~/{}$ \".format('/'.join(self.curDir))\n else :\n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~$ \"\n self.userp = userp.encode()\n client.send(self.userp)\n shellin = client.recv(2048).decode().strip('\\n')\n if shellin == \"exit\" or shellin == \"exit \" or shellin ==\"exit \" or shellin ==\"exit \" :\n break\n elif shellin == \"\" :\n continue\n elif shellin.split()[0] in self.denied :\n client.send(self.err.format(shellin.split()[0]).encode())\n else :\n self.handle_extended_commands(client,addr,shellin)\n continue\n client.close()\n except Exception as E:\n print(E)\n print(Log(\"Connection with {} Terminated\".format(addr)))", "def _prepare(self):\n self.code = random.randint(1000,9999)\n self.user_guess.append(\"----\")\n self.user_guess.append(\"----\")\n self.applied_guess.append(\"****\")\n self.applied_guess.append(\"****\")", "def prompt(self, question):\n self.output(' ')\n self.output(question)\n self.output(self.parse_response(str(self.ui())))", "def play():\n display_starting_message()\n print(\"\")\n print(\"*\"*10)\n for question_number, question in enumerate(list_of_questions):\n print(question)\n print(\"\")\n for responses in list_of_questions[question]:\n print(responses)\n pick_one = input(\"pick one: \")\n check_murder_sauce(question, pick_one)\n\n murder_sauce_result(murder_sauce)", "def silkscreen_commands(self, commands):\n self.pcb_layers[\"silkscreen\"].commands = commands", "def lobby_screen_to_other_ready_action(ai_settings, screen,buttons, screen_status, button_status, card_database_filter, user, player2):\n save_pass = True\n # Clear dup number each call\n\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + user.deck_list_index in line:\n list1 = make_deck_from_string(line.replace('DECK_LIST_' + user.deck_list_index + ' = ', ''), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n\n if user.deck_list_index == '0' or user.deck_list_index == 'new':\n button_status.lobby_screen_end_screen_warning_button_display = 'no deck'\n save_pass = False\n\n elif len(list1) < 40:\n\n button_status.lobby_screen_end_screen_warning_button_display = 'deck less than 40 cards'\n save_pass = False\n\n\n if save_pass:\n\n player2.identity = 'pvp'\n user.deck_list = []\n user.character_card = ''\n\n if button_status.lobby_screen_room_detail_display == 'other':\n button_status.lobby_screen_other_ready_to_go = True\n elif button_status.lobby_screen_room_detail_display == 'my':\n button_status.lobby_screen_my_ready_to_go = True\n\n # Render user's deck\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + user.deck_list_index in line:\n user.deck_list = make_deck_from_string(line.replace('DECK_LIST_' + user.deck_list_index + ' = ', ''), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n if 'CHARACTER_' + user.deck_list_index in line:\n user.character_card = make_deck_from_string(line.replace('CHARACTER_' + user.deck_list_index + ' = ', ''), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)[0]\n\n if button_status.lobby_screen_room_detail_display == 'my':\n user.random_deck_list = random.sample(user.deck_list, len(user.deck_list))\n user.remain_deck_list = user.random_deck_list[5:]\n user.hand_list = user.random_deck_list[0:5]\n elif button_status.lobby_screen_room_detail_display == 'other':\n user.random_deck_list = random.sample(user.deck_list, len(user.deck_list))\n user.remain_deck_list = user.random_deck_list[6:]\n user.hand_list = user.random_deck_list[0:6]", "def ask_for_blessing(test):\n print()\n msg = \"Is this output correct ([Y]es/[N]o/[S]top)? \"\n print(Ansi.in_color(msg, Ansi.WHITE), end=\"\")\n choice = input().lower()\n if choice.startswith(\"y\"):\n test.bless_output()\n test.result = NEW_OUTPUT\n elif choice.startswith(\"s\"):\n sys.exit(2)\n else:\n test.result = WRONG_OUTPUT", "def showPrompt(self):\r\n self.terminal.nextLine()\r\n self.terminal.write(self.ps[self.pn])", "def initial_h_mode(stdscr):\r\n j=0\r\n unit=0.36 #default\r\n initial_points=[]\r\n stdscr.clear()\r\n while True:\r\n \r\n #print(\"------------Hand control mode------------\")\r\n #print(\"unit:\", unit)\r\n #print(\"current position: \"+\"( \"+str(_x_degrees)+\" , \"+str(_y_degrees)+\" )\")\r\n stdscr.addstr(0, 0, \"------------initialize hand control mode------------\")\r\n stdscr.addstr(1, 0, \"unit: {}\".format(unit))\r\n stdscr.addstr(2, 0, \"current position: ({},{})\".format(_x_degrees,_y_degrees))\r\n if j==0:\r\n stdscr.addstr(3, 0,\"please shoot upper left\")\r\n elif j==1:\r\n stdscr.addstr(3, 0,\"please shoot upper right\")\r\n elif j==2:\r\n stdscr.addstr(3, 0,\"please shoot lower right\")\r\n elif j==3:\r\n stdscr.addstr(3, 0,\"please shoot lower left\")\r\n\r\n ch=stdscr.getch()\r\n stdscr.clear()\r\n \r\n if ch==259:#up\r\n rotate_degree(0,unit)\r\n elif ch==258:#down\r\n rotate_degree(0,-unit)\r\n elif ch==261:#left\r\n rotate_degree(unit,0)\r\n elif ch==260:#right\r\n rotate_degree(-unit,0)\r\n elif ch==43:#+\r\n if unit>=72:\r\n continue\r\n unit=unit+0.36\r\n elif ch==45:#-\r\n if unit<=0.36:\r\n continue\r\n unit=unit-0.36\r\n elif ch==113 or ch==813:\r\n skip_shoot_by_tcp=True\r\n break\r\n elif ch==10: # enter\r\n initial_points.append(_x_degrees)\r\n initial_points.append(_y_degrees)\r\n j=j+1\r\n print(initial_points)\r\n if j==4:\r\n sendAngles(initial_points)\r\n return\r\n elif ch==8: #backspace\r\n if j==0:\r\n continue\r\n else:\r\n j=j-1\r\n initial_points.pop()\r\n initial_points.pop()\r\n stdscr.refresh()", "def show_menu(stdscr, choice=0):\n stdscr.clear()\n curses.curs_set(False)\n stdscr.addstr(\"*** --- Interface de chiffrement --- ***\\n\\n\")\n if choice == 1:\n stdscr.addstr(\"->1<- Chiffrement symétrique avec Threefish\\n\", curses.color_pair(1))\n else:\n stdscr.addstr(\"->1<- Chiffrement symétrique avec Threefish\\n\")\n if choice == 2:\n stdscr.addstr(\"->2<- Chiffrement de Cramer-Shoup\\n\", curses.color_pair(1))\n else:\n stdscr.addstr(\"->2<- Chiffrement de Cramer-Shoup\\n\")\n if choice == 3:\n stdscr.addstr(\"->3<- Hashage d'un fichier\\n\", curses.color_pair(1))\n else:\n stdscr.addstr(\"->3<- Hashage d'un fichier\\n\")\n if choice == 4:\n stdscr.addstr(\"->4<- Déchiffrement symétrique avec Threefish\\n\", curses.color_pair(1))\n else:\n stdscr.addstr(\"->4<- Déchiffrement symétrique avec Threefish\\n\")\n if choice == 5:\n stdscr.addstr(\"->5<- Déchiffrement de Cramer-Shoup\\n\", curses.color_pair(1))\n else:\n stdscr.addstr(\"->5<- Déchiffrement de Cramer-Shoup\\n\")\n if choice == 6:\n stdscr.addstr(\"->6<- Vérification du hash\\n\", curses.color_pair(1))\n else:\n stdscr.addstr(\"->6<- Vérification du hash\\n\")\n if choice == 7:\n stdscr.addstr(\"->q<- Pour quitter\\n\", curses.color_pair(1))\n else:\n stdscr.addstr(\"->q<- Pour quitter\\n\")\n stdscr.refresh()", "def _do_outputs(self):\n self._puzzle.display_revealed_puzzle()\n hint = self._puzzle.get_hint()\n self._console.write(hint)\n print(\"\")\n self._jumper.draw_jumper()\n print(\"\")\n\n # These ifs end the game\n if self._puzzle.is_solved():\n self._keep_playing = False\n self._puzzle.display_win_screen()\n \n if self._puzzle.incorrect_guesses >= 4:\n self._keep_playing = False\n self._puzzle.display_loss_screen()", "def initial_draw(self):\n self.player.take_card(self.deck)\n self.dealer.take_card(self.deck)\n self.player.take_card(self.deck)\n self.dealer.put_face_down(self.deck)", "def displayHands(p_hand, d_hand):\n os.system('clear') # Call to OS clear the screen to clean up output\n print(\"\\nPlayer hand: \", p_hand.showHand())\n print(\"Player score: \", p_hand.handSum())\n\n print(\"\\nDealer hand: \", d_hand.showHand())\n print(\"Dealer score: \", d_hand.handSum())", "def lobby_screen_pick_deck_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2):\n # Pick deck text\n button_text_1 = Button('Pick an exist deck or create a new one: ','', (250,250,250),400, 100, 400, 35, font_color = (0,0,0), alpha = 150)\n button_text_1.update()\n button_text_1.draw(screen)\n\n # Deck list buttons\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n if len(f.readlines()) >= 12:\n pass\n else:\n button_new_deck = Button('+ New Deck','', (250,250,250),1020, 110, 120, 35, font_color = (0,0,0), alpha = 150)\n button_new_deck.update()\n button_new_deck.draw(screen)\n\n\n f.seek(0)\n x = len(f.readlines())\n y = 0\n deck_list_index = 0\n\n for i in range(1,7):\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) not in line:\n y += 1\n if y < x: # DECK_LIST_i exist\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) in line:\n deck_length = len(make_card_list_from_string(line.replace('DECK_LIST_' + str(i) + ' = ', ''), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2))\n # deck_length = int((len(line.replace('DECK_LIST_' + str(i) + ' = ', '')) -1)/14)\n if 'CHARACTER_' + str(i) in line:\n character_length = 1\n character_card = eval('card_' + line.replace('CHARACTER_' + str(i) + ' = ', '')[7:12])\n\n if user.deck_list_index == str(i):\n\n button_top = Button(character_card.name + ': ','', (100,30,130),85 + 180* (i-1), 165, 130, 60)\n button_top.update()\n button_top.draw(screen)\n\n if deck_length < 40:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (100,30,130),85 + 180* (i-1), 225, 130, 50, font_color = (250,0,0))\n button_bottom.update()\n button_bottom.draw(screen)\n else:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (100,30,130),85 + 180* (i-1), 225, 130, 50)\n button_bottom.update()\n button_bottom.draw(screen)\n\n else:\n\n button_top = Button(character_card.name + ': ','', (160,160,160),85 + 180* (i-1), 165, 130, 60, alpha = 240)\n button_top.update()\n button_top.draw(screen)\n\n if deck_length < 40:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (160,160,160),85 + 180* (i-1), 225, 130, 50, font_color = (200,0,0), alpha = 240)\n button_bottom.update()\n button_bottom.draw(screen)\n else:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (160,160,160),85 + 180* (i-1), 225, 130, 50, alpha = 240)\n button_bottom.update()\n button_bottom.draw(screen)\n\n y = 0\n\n else: # DECK_LIST_i not exist\n\n button = Button('Empty','', (200,200,200),85 + 180* (i-1), 165, 130, 110, alpha = 80)\n button.update()\n button.draw(screen)\n\n y = 0\n\n\n for i in range(1,7):\n if user.deck_list_index == str(i):\n button_edit = Button('Edit','', (50,50,170),85 + 180* (i-1), 282, 60, 30)\n button_edit.update()\n button_edit.draw(screen)\n\n button_delete = Button('Delete','', (160,30,30), 155 + 180* (i-1), 282, 60, 30)\n button_delete.update()\n button_delete.draw(screen)", "def session_preparation(self):\n self._test_channel_read()\n self.set_base_prompt()\n self.disable_paging(command=\"screen-length 0 temporary\")\n # Clear the read buffer\n time.sleep(0.3 * self.global_delay_factor)\n self.clear_buffer()", "def otherOptionsFullScreen(self):\n\n # Set Storage List\n storageList = []\n # Create Intel explain menu\n menuDisplay = \"\"\"\n \\n\n [*] Information Verbose:\n Ontop of Asking for the Username and \n Password Should we Gather Even\n More Information about the User such as \n GEOIP / ISP / User Agent etc. etc. \n This Requires Curl to be installed or \n file_get_contents in PHP on selected Server \n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n # Set Verbose of Intel Gather\n self.results = input(\n \"\\nWould you like to Build a More In-depth Intel Report on Victim ( y Or n ): \")\n if self.results.lower()[0] == \"y\" or self.results.lower() == \"yes\":\n storageList.append(\"INTEL_VERBOSE_LOUD\")\n elif self.results.lower()[0] == \"n\" or self.results.lower() == \"no\":\n storageList.append(\"INTEL_VERBOSE_HUSH\")\n else:\n # Anything Else lets just Hush it then\n storageList.append(\"INTEL_VERBOSE_HUSH\")\n # Redirect Ask\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = Redirect URL Which is the Same \n = URL of the Full-Screen Attack \n = you picked. For Instance If \n = it was AOL Full-Screen Attack\n = the default URL redirect would \n = be https://my.screenname.aol.com\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"After the Victim Inputs Info Where Should the Script Redirect?: \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"REDIRECT_DEFAULT\")\n else:\n # No Checking on URL Let Them Use Whatever lol there bad i guess\n # Append Default Redirect Naaaow\n storageList.append(self.results)\n\n # Spoof link\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = What do you want the URL Link to be spoofed\n = to? This will be displayed when the user\n = rolls over the link. Basically tricking\n = them making them think they are going\n = to that URL..\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What should the URL be spoofed to? (ex: https://my.screenname.aol.com): \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"DEFAULT_SPOOF\")\n else:\n # Append specified spoof url now\n storageList.append(self.results)\n\n # link name\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = What do you want the Actual URL name\n = to be?\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What should the URL name be? (ex: Aol Login): \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"DEFAULT_URL_NAME\")\n else:\n # Append url name\n storageList.append(self.results)\n\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = name of Index.php If you feel \n = the need to change the name please \n = do not add the actual extension .php \n = along with it only add whatever crazy \n = name you come up with\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What Should the Main Index PHP File Be Called? ( ex: login ) : \")\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"INDEX_DEFAULT\")\n else:\n check = self.results.find(\".\")\n # if it doesn't return a -1 it found a decimal\n if check != -1:\n # Throw Error we found a dot\n self.errorOutput(\n \"[*] Error - Didn't We Say Not to Add an Extension, WOW...\", \"yellow\")\n else:\n # Append name of the File\n storageList.append(self.results)\n\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = Title of the Webpage.\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"blue\")\n self.results = input(\n \"What Should the Title of the Page be? (ex: AOL Login ) : \")\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"TITLE_DEFAULT\")\n else:\n # Append name of the File\n storageList.append(self.results)\n\n # Return Storage List for Processing\n return storageList", "def prompt_to_canvas(self, canvas):\n # Draw the prompt text\n canvas.blit(self.prompt_surface_shadow, ((self.canvas_width // 5) + 25, self.canvas_width // 2 + 80))\n canvas.blit(self.prompt_surface, ((self.canvas_width // 5) + 20, self.canvas_width // 2 + 80))", "def play(self):\n prize = 0\n # part 1: 3 questions of 5000 NIS each\n line = 'Welcome to the first part!\\n' + '3 questions of 5000 NIS start NOW\\n'\n self.client.send((NO_RESPONSE + line).encode())\n for i in range(3): # 3 questions\n q = self.get_question() # get random question from stock\n line = str(q) + '\\nChoose your answer (1-4): '\n self.client.send((ASK_RESPONSE + line).encode())\n answer = int(self.client.recv(MAX_INPUT).decode()) # get client answer\n # check answer and update prize\n if answer == q.get_answer():\n line = 'Well Done! you are right!\\n'\n self.client.send((NO_RESPONSE + line).encode())\n prize += 5000\n else:\n line = 'You are wrong! Maybe next time!\\n'\n self.client.send((NO_RESPONSE + line).encode())\n\n # part 2: choose where to start\n line = ('Welcome to the second part!\\n' + 'You have ' + str(prize) + ' NIS for now\\n' +\n 'You can stay with it but you also can...\\n' +\n '1. step back: compete for ' + str(prize * 2) + ' NIS and start 2 steps from the chaser\\n' +\n '2. stay: compete for ' + str(prize) + ' NIS and start 3 steps from the chaser\\n' +\n '3. step ahead: compete for ' + str(prize // 2) + ' NIS and start 4 steps from the chaser\\n' +\n 'Choose an option (1-3): \\n')\n self.client.send((ASK_RESPONSE + line).encode())\n answer = int(self.client.recv(MAX_INPUT).decode())\n prize *= 2 if answer == 1 else 1/2 if answer == 3 else 1 # update prize (*1 or *1/2 or *2)\n prize = int(prize) # and not float\n self.b = Board(answer) # initialize board\n line = '--One time you can type \\'help\\' and disable 2 answers--\\n'\n self.client.send((NO_RESPONSE + line).encode())\n\n # part 2: let the chaser chase!\n for i in range(12): # 12 questions left\n self.client.send((NO_RESPONSE + str(self.b).encode()) # send board\n q = self.get_question() # get random question from stock\n chaser_answer = self.get_chaser_answer(q) # get chaser answer (75% right)\n line = str(q) + '\\nChoose your answer (1-4): '\n self.client.send((ASK_RESPONSE + line).encode())\n\n # get client answer: int (1/2/3/4) -or- 'help'\n while True: # until client choose answer (1/2/3/4)\n player_answer = self.client.recv(MAX_INPUT).decode() # get answer\n if player_answer == 'help':\n if self.there_is_help:\n self.get_help(q) # send 2 option instead of 4\n self.there_is_help = False # update flag\n line = '\\nChoose your answer (1-4): ' # ask for new answer\n self.client.send((ASK_RESPONSE + line).encode())\n continue\n else: # client already used his help, ask for an answer\n line = 'You already used it!\\n' + 'Choose your answer (1-4): '\n self.client.send((ASK_RESPONSE + line).encode())\n continue\n # else: answer is 1/2/3/4\n break\n\n # update board, check if the game end (win/lose)\n self.update_board(int(player_answer), chaser_answer, q.get_answer())\n win_lose = self.check_win_lose()\n if win_lose == 1: # win\n line = 'Well Done! You Win ' + str(prize) + ' NIS!'\n self.client.send((NO_RESPONSE + line).encode())\n return\n elif win_lose == 2: # lose\n line = 'Oh No! You Lose! Maybe Next Time...'\n self.client.send((NO_RESPONSE + line).encode())\n return", "def prepare_screen_end_screen_warning_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n if button_status.prepare_screen_end_screen_warning_button_display == 'deck less than 40 cards':\n button = Button('You need at least 40','' ,(122,33,38),1050, 0, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('cards in your deck!','' ,(122,33,38),1050, 30, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 60, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 62, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)\n\n elif button_status.prepare_screen_end_screen_warning_button_display == 'no deck':\n button = Button('Please pick a deck','' ,(122,33,38),1050, 0, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('or build a new one!','' ,(122,33,38),1050, 30, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 60, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 62, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)\n\n elif button_status.prepare_screen_end_screen_warning_button_display == 'no character':\n button = Button('Please pick a character','' ,(122,33,38),1050, 0, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('for your opponent!','' ,(122,33,38),1050, 30, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 60, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 62, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)\n\n elif button_status.prepare_screen_end_screen_warning_button_display == 'no difficulty':\n button = Button('Please pick a difficulty','' ,(122,33,38),1050, 0, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('for your opponent!','' ,(122,33,38),1050, 30, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 60, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 62, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)", "def displayCode(display):\n global ck_display, listen\n\n while listen:\n try:\n data, addr = s[display].recvfrom(1024)\n dump = data.decode()\n tagmatch = re.findall('.*:', dump)\n try:\n tag = tagmatch[0][0:-1]\n except IndexError:\n tag = ''\n if 'KILL:' in tagmatch:\n ckcode = re.sub('\\nKILL:', '', dump).replace('\\n', '')\n ck_display[str(display)].configure(bg=ckcode)\n elif display == '1':\n #print(str(data, 'utf-8'))\n if len(tagmatch) > 0:\n ckcode = re.sub(''+tag+':', '', dump)\n try:\n if tag == 'delete':\n ck_display[display].delete(\"%s-1c\" % tkinter.INSERT, tkinter.INSERT)\n elif tag == 'clear':\n ck_display[display].delete('1.0', tkinter.END)\n elif tag == 'boom':\n # activating BOOM\n ck_display[display].insert(tkinter.END, ckcode, tag)\n ck_display[display].see(tkinter.END)\n else:\n # show a quick flash when evaluating a command\n if tag in ('snippet', 'hi', 'low', ''):\n start_flash(display)\n end_flash(display)\n ck_display[display].insert(tkinter.END, ckcode, tag)\n ck_display[display].see(tkinter.END)\n except RuntimeError as err:\n break\n elif display == '2':\n if len(tagmatch) > 0:\n ckcode = re.sub(''+tag+':', '', dump)\n try:\n if tag == 'boom':\n # activating BOOM\n ck_display[display].insert(tkinter.END, ckcode, tag)\n ck_display[display].see(tkinter.END)\n else:\n if tag in ('snippet', 'hi', 'low', ''):\n start_flash(display)\n end_flash(display)\n ck_display[display].insert(tkinter.END, ckcode, tag)\n ck_display[display].see(tkinter.END)\n except RuntimeError as err:\n break\n elif display == '3':\n if len(tagmatch) > 0:\n ckcode = re.sub(''+tag+':', '', dump)\n try:\n if tag == 'result' or tag == 'error':\n ck_display[display].delete(1.0, tkinter.END)\n ck_display[display].insert(tkinter.END, ckcode, tag)\n else:\n if tag in ('primitive') or 'flash:' in ckcode:\n start_flash(display)\n end_flash(display)\n ckcode = ckcode.replace('flash:', '')\n ck_display[display].insert(tkinter.END, ckcode, tag)\n ck_display[display].see(tkinter.END)\n except RuntimeError as err:\n break\n elif display == '4':\n if len(tagmatch) > 0:\n ckcode = re.sub(''+tag+':', '', dump)\n try:\n if tag == 'result' or tag == 'error':\n ck_display[display].delete(1.0, tkinter.END)\n ck_display[display].insert(tkinter.END, ckcode, tag)\n else:\n if 'flash:' in ckcode:\n start_flash(display)\n end_flash(display)\n ck_display[display].insert(tkinter.END, ckcode.replace('flash:', ''), 'conditional') \n else:\n ck_display[display].insert(tkinter.END, ckcode, tag)\n ck_display[display].see(tkinter.END)\n except RuntimeError as err:\n break\n elif display == '5': #this is the codespace\n if len(tagmatch) > 0:\n ckcode = re.sub(''+tag+':', '', dump)\n try:\n if tag == 'delete':\n ck_display[display].delete(\"%s-1c\" % tkinter.INSERT, tkinter.INSERT)\n elif tag == 'evaluate':\n start_flash(display)\n end_flash(display)\n else:\n ck_display[display].insert(tkinter.END, ckcode, tag)\n ck_display[display].see(tkinter.END)\n except RuntimeError as err:\n break\n except OSError as err:\n print(err)\n break\n\n #time.sleep(0.01)", "def main():\n\n deck1 = Deck()\n deck1.shuffle()\n\n dealerHand = Hand()\n playerHand = Hand()\n\n startGame(dealerHand, playerHand, deck1)\n while dealerHand.handSum() < 16:\n evalHand(dealerHand)\n hitMe(dealerHand, deck1)\n # print(\"dealer hand after evalHand is: \", dealerHand.showHand())\n if dealerHand.handSum() > 21:\n os.system(\"clear\")\n displayHands(playerHand, dealerHand)\n print(\"Dealer sum exceeded 21.\"\n \" Dealer hand {}. Dealer sum {}. Player wins!!\".format(dealerHand.handSum(), dealerHand.showHand()))\n return\n\n displayHands(playerHand, dealerHand)\n evalHand(dealerHand)\n\n \"\"\"\n if dealerHand.handSum() > 21:\n game_status = stand(playerHand, dealerHand)\n print(\"Game status is: \", game_status)\n \"\"\"\n # print(\"\\nYour hand score is: \", playerHand.handSum())\n displayHands(playerHand, dealerHand)\n ans = input(\"\\nDo you want another card (y or n)? \")\n\n while ans == 'y':\n hitMe(playerHand, deck1)\n evalHand(playerHand)\n displayHands(playerHand, dealerHand)\n if playerHand.handSum() > 21:\n os.system(\"clear\")\n displayHands(playerHand, dealerHand)\n print(\"Player exceeded 21. \"\n \"Player hand {}. Dealer sum {}. Dealer wins!!\".format(playerHand.showHand(), playerHand.handSum()))\n return\n ans = input(\"Do you want another card (y or n)?\")\n\n displayHands(playerHand, dealerHand)\n print(\"\\nGame outcome is: \", stand(playerHand, dealerHand))", "def session_preparation(self):\n # 0 will defer to the global delay factor\n delay_factor = self.select_delay_factor(delay_factor=0)\n self._test_channel_read()\n self.set_base_prompt()\n cmd = f\"{self.RETURN}set cli mode -page OFF{self.RETURN}\"\n self.disable_paging(command=cmd)\n time.sleep(1 * delay_factor)\n self.set_base_prompt()\n time.sleep(0.3 * delay_factor)\n self.clear_buffer()", "def welcome_screen(self):\n print()\n print('P*O*K*E*R')\n print('Welcome to a 5-card poker game,\\n' +\n 'The goal is the get a better hand than the AI.')\n print('To do this you get one chance to swap cards' +\n 'that are in your hand')\n print('You swap like this:\\n' +\n '1. Choose how many cards you want to swap\\n' +\n '2. Write the number of the card(s) you want to swap, like this:\\n' +\n 'If you want to swap card 2, type in 2.\\n' +\n 'If you want to swap card 1 and 4, type 1,4')\n print('Next both your and AI hand is shown,\\n' +\n 'and the winner is declared.')\n print('For information on what hand beats what, \\n' +\n 'and what happens when both players have an equally good hand,\\n' +\n 'please follow the link below:\\n' +\n 'https://github.com/oljung/portfolio-project-three\\n' +\n 'NOTE! Ctrl + c will terminate the app, use right click to copy')\n message = 'Would you like to play a round? Y(es) or N(o): '\n answer = InputHandler.input_bool(message)\n if answer:\n self.run_game()", "def prepare_screen_to_battle_screen_action(ai_settings, screen,buttons, screen_status, button_status, card_database_filter, user, player2):\n save_pass = True\n # Clear dup number each call\n\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + user.deck_list_index in line:\n list1 = make_deck_from_string(line.replace('DECK_LIST_' + user.deck_list_index + ' = ', ''), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n\n if user.deck_list_index == '0' or user.deck_list_index == 'new':\n button_status.prepare_screen_end_screen_warning_button_display = 'no deck'\n save_pass = False\n\n elif len(list1) < 40:\n\n button_status.prepare_screen_end_screen_warning_button_display = 'deck less than 40 cards'\n save_pass = False\n\n elif player2.character_ai_index == '0':\n button_status.prepare_screen_end_screen_warning_button_display = 'no character'\n save_pass = False\n\n elif player2.ai_difficulty_index == '0':\n button_status.prepare_screen_end_screen_warning_button_display = 'no difficulty'\n save_pass = False\n\n if save_pass:\n\n # render AI character deck\n if player2.character_ai_index == '1':\n player2.character_card = make_deck_from_string(str(['CARD_01_16']), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)[0]\n player2.deck_list = make_deck_from_string(player2.NIXIE_DECK, ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n\n elif player2.character_ai_index == '2':\n player2.character_card = make_deck_from_string(str(['CARD_01_37']), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)[0]\n player2.deck_list = make_deck_from_string(player2.MAYA_DECK, ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n\n elif player2.character_ai_index == '3':\n player2.character_card = make_deck_from_string(str(['CARD_01_59']), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)[0]\n player2.deck_list = make_deck_from_string(player2.IVAN_DECK, ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n\n elif player2.character_ai_index == '4':\n player2.character_card = make_deck_from_string(str(['CARD_01_89']), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)[0]\n player2.deck_list = make_deck_from_string(player2.SHERMAN_DECK, ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n\n elif player2.character_ai_index == '5':\n player2.character_card = make_deck_from_string(str(['CARD_03_11']), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)[0]\n player2.deck_list = make_deck_from_string(player2.MOBY_DECK, ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n\n elif player2.character_ai_index == '6':\n player2.character_card = make_deck_from_string(str(['CARD_05_30']), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)[0]\n player2.deck_list = make_deck_from_string(player2.MAHIBANG_DECK, ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n\n elif player2.character_ai_index == '7':\n player2.character_card = make_deck_from_string(str(['CARD_01_64']), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)[0]\n player2.deck_list = make_deck_from_string(player2.MISTMOON_DECK, ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n\n elif player2.character_ai_index == '8':\n player2.character_card = make_deck_from_string(str(['CARD_05_61']), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)[0]\n player2.deck_list = make_deck_from_string(player2.FANGBLADE_DECK, ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n\n # Set up AI difficulty\n if player2.ai_difficulty_index == '1':\n player2.random_deck_list = random.sample(player2.deck_list, len(player2.deck_list))\n player2.remain_deck_list = player2.random_deck_list[3:]\n player2.hand_list = player2.random_deck_list[0:3]\n player2.character_card.health = str(int(int(player2.character_card.health)*0.5))\n\n elif player2.ai_difficulty_index == '2':\n player2.random_deck_list = random.sample(player2.deck_list, len(player2.deck_list))\n player2.remain_deck_list = player2.random_deck_list[6:]\n player2.hand_list = player2.random_deck_list[0:6]\n player2.character_card.health = str(int(player2.character_card.health)*1)\n\n elif player2.ai_difficulty_index == '3':\n player2.random_deck_list = random.sample(player2.deck_list, len(player2.deck_list))\n player2.remain_deck_list = player2.random_deck_list[10:]\n player2.hand_list = player2.random_deck_list[0:10]\n player2.character_card.health = str(int(player2.character_card.health)*2)\n\n elif player2.ai_difficulty_index == '4':\n player2.random_deck_list = random.sample(player2.deck_list, len(player2.deck_list))\n player2.remain_deck_list = player2.random_deck_list[6:]\n player2.hand_list = player2.random_deck_list[0:30]\n player2.character_card.health = str(int(player2.character_card.health)*4)\n\n\n # Render user's deck\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + user.deck_list_index in line:\n user.deck_list = make_deck_from_string(line.replace('DECK_LIST_' + user.deck_list_index + ' = ', ''), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n if 'CHARACTER_' + user.deck_list_index in line:\n user.character_card = make_deck_from_string(line.replace('CHARACTER_' + user.deck_list_index + ' = ', ''), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)[0]\n #user.character_card = eval('card_' + line.replace('CHARACTER_' + user.deck_list_index + ' = ', '')[7:12])\n\n user.random_deck_list = random.sample(user.deck_list, len(user.deck_list))\n user.remain_deck_list = user.random_deck_list[6:]\n user.hand_list = user.random_deck_list[0:6]\n\n\n # Clear up and initiate all display/progress indicator on battle screen\n # screen_status\n screen_status.battle_screen_my_hand_page_id = 1\n screen_status.battle_screen_action_indicator = 'stage-0'\n screen_status.battle_screen_player2_action_display_indicator = False\n # button_status\n button_status.battle_screen_win_lost_display = False\n button_status.battle_screen_win_lost_indicator = ''\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n button_status.battle_screen_instruction_bar_skip_display = False\n button_status.battle_screen_instruction_bar_skip_backend = False\n button_status.battle_screen_stable_button_backend = True\n button_status.battle_screen_my_hand_page_change_button_backend = True\n button_status.battle_screen_menu_display = False\n button_status.battle_screen_history_bar_detail_display = False\n button_status.battle_screen_history_bar_text_dict = {\n '1' : '',\n '2' : '',\n '3' : '',\n '4' : '',\n '5' : '',\n '6' : '',\n '7' : '',\n '8' : '',\n '9' : '',\n '10' : '',\n '11' : '',\n '12' : '',\n '13' : '',\n '14' : '',\n '15' : '',\n }\n button_status.battle_screen_my_hand_indicator_display = False\n button_status.battle_screen_my_hand_indicator_position = '1'\n button_status.battle_screen_player1_battleground_indicator_display = False\n button_status.battle_screen_player1_battleground_indicator_position = '1'\n button_status.battle_screen_player2_battleground_indicator_display = False\n button_status.battle_screen_player2_battleground_indicator_position = '1'\n button_status.card_zoom_active = False\n button_status.card_zoom_screen_indicator = 'build_deck_screen'\n button_status.card_zoom_part_indicator = ''\n button_status.card_zoom_position_indicator = '1'\n button_status.battle_screen_win_lost_indicator = ''\n\n #user\n user.monster_in_play_dict = {\n '1' : '',\n '2' : '',\n '3' : '',\n '4' : '',\n '5' : '',\n '6' : '',\n }\n user.monster_in_play_length = '0'\n user.item_in_play_dict = {\n '1' : '',\n '2' : '',\n '3' : '',\n '4' : '',\n '5' : '',\n '6' : '',\n }\n user.item_in_play_length = '0'\n user.character_under_card_by_level = {\n '10' : '',\n '20' : '',\n '30' : '',\n '40' : '',\n '50' : '',\n '60' : '',\n '70' : '',\n '80' : '',\n '90' : '',\n '100' : '',\n '110' : '',\n '120' : '',\n '130' : '',\n '140' : '',\n '150' : '',\n }\n user.stage_2_other_card_usable_list = []\n\n #player2\n player2.character_under_card_by_level = {\n '10' : '',\n '20' : '',\n '30' : '',\n '40' : '',\n '50' : '',\n '60' : '',\n '70' : '',\n '80' : '',\n '90' : '',\n '100' : '',\n '110' : '',\n '120' : '',\n '130' : '',\n '140' : '',\n '150' : '',\n }\n player2.monster_in_play_dict = {\n '1' : '',\n '2' : '',\n '3' : '',\n '4' : '',\n '5' : '',\n '6' : '',\n }\n player2.monster_in_play_length = '0'\n player2.item_in_play_dict = {\n '1' : '',\n '2' : '',\n '3' : '',\n '4' : '',\n '5' : '',\n '6' : '',\n }\n player2.item_in_play_length = '0'\n player2.stage_2_other_card_usable_list = []", "def win():\n try:\n color.write(\"=============================================\\n\",\"BUILTIN\")\n print(\"Ctrl + C to skip!\")\n slow_print(\"Finally\")\n intro_dots()\n slow_print(\"I am here at the Star of Tangaroa~\")\n time.sleep(0.5)\n slow_print(\"I wield the fishing rod of legends, and it is now time for the greatest catch~\")\n time.sleep(0.5)\n slow_print(\"Time to fish.~~\",\"ERROR\")\n except:\n pass\n fishing = True\n while fishing:\n command = input(\"Enter a command: \").lower()\n if command == 'fish' or command == 'f' or command == 'fishing':\n fishing = False\n else:\n color.write(\"The time for the greatest catch is here. You wouldn't miss this for the world would you?\\n\\n\",\"ERROR\")\n\n try:\n color.write(\"=============================================\\n\",\"BUILTIN\")\n print(\"Ctrl + C to skip!\")\n slow_print(\"Nnngh!~\")\n time.sleep(0.5)\n slow_print(\"This is a big one!~\")\n time.sleep(0.5)\n slow_print(\"BUT I AM MĀUI-POTIKI, AND NO CATCH WILL BEST ME!~\")\n time.sleep(0.5)\n slow_print(\"THIS~\",\"ERROR\")\n time.sleep(0.2)\n slow_print(\"IS~\",\"ERROR\")\n time.sleep(0.2)\n slow_print(\"MY~\",\"ERROR\")\n time.sleep(0.2)\n slow_print(\"LEGEND!~\",\"ERROR\")\n time.sleep(2)\n \n except:\n pass\n ending(\"win\")", "def intro_instructions():\n print(\"The board will be updated after each move.\")\n print(\"Watch both the board and the python prompt after each move.\")\n print(\"Player 1 is white and player 2 is orange\")\n print(\"Green boxes are snakes and yellow boxes are ladders.\")\n print(\"If you hit any part of the snake(not just the head), you will slide down to the snakes tail\")\n print(\"If you hit any part of the ladder(not just the bottom), you will climb to the ladder's top\")\n print(\"May the luckiest player win\")", "def bids_cli():", "def build_deck_screen_end_screen_warning_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n if button_status.build_deck_screen_end_screen_warning_button_display == 'character card':\n button = Button('Missing A','' ,(122,33,38),1050, 0, 150, 30,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('Character Card!','' ,(122,33,38),1050, 30, 150, 30,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 60, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 62, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)\n\n elif button_status.build_deck_screen_end_screen_warning_button_display == '4 copy each':\n button = Button('No More Than 4','' ,(122,33,38),1050, 0, 150, 30,font_size = 15)\n button.update()\n button.draw(screen)\n\n button = Button('Copies For Each Card!','' ,(122,33,38),1050, 30, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 60, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 62, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)", "def process_input(server_msg):\n global username\n original=server_msg\n server_msg=server_msg.split(\" \") #separa por args\n\n if(server_msg[0]==\"DISPLAY\"):\n print(original[8::].replace(';', '\\n'))\n\n\n\n elif(server_msg[0]==\"SUC\"):\n if(len(server_msg)>1):\n if(server_msg[1]==\"REG_OK\"):\n username='[' + server_msg[2].lower() + ']'\n print(REG_OK)\n elif(server_msg[1]=='INVITE_OK'):\n print(INVITE_OK)\n elif(server_msg[1]=='REJECT'):\n print(REJECT.format(server_msg[2]))\n elif(server_msg[1]=='DISCONNECT'):\n print(DISCONNECT)\n username=''\n\n elif(server_msg[0]=='GAME'):\n if(server_msg[1]=='START'):\n print(START.format(server_msg[2]))\n elif(server_msg[1]=='WIN'):\n print(WIN)\n elif(server_msg[1]=='LOSE'):\n print(LOSE)\n elif(server_msg[1]=='TIE'):\n print(TIE)\n elif(server_msg[1]=='FOLD'):\n print(FOLD.format(server_msg[2]))\n\n\n elif(server_msg[0]==\"ERR\"):\n if(server_msg[1]==\"BAD_REQUEST\"):\n print(BAD_REQUEST)\n elif(server_msg[1]==\"REG_FAIL\"):\n print(REG_FAIL)\n elif(server_msg[1]==\"USER_REGISTERED\"):\n print(USER_REGISTERED)\n elif(server_msg[1]=='USER_BUSY'):\n print(USER_BUSY)\n elif(server_msg[1]=='USER_UNKNOWN'):\n print(USER_UNKNOWN)\n elif(server_msg[1]=='IMBUSY'):\n print(IMBUSY)\n elif(server_msg[1]=='NO_USER'):\n print(NO_USER)\n elif(server_msg[1]=='NO_ENV'):\n print(NO_ENV)\n elif(server_msg[1]=='NOT_IN_GAME'):\n print(NOT_IN_GAME)\n elif(server_msg[1]=='NO_TURN'):\n print(NO_TURN)\n elif(server_msg[1]=='REG_FAIL'):\n print(REG_FAIL)\n elif(server_msg[1]=='INVALID_PLAY'):\n print(INVALID_PLAY.format(server_msg[2], server_msg[3], server_msg[4]))\n elif(server_msg[1]=='BAD_FORMAT'):\n print(BAD_FORMAT)\n elif(server_msg[1]=='INVALID_COOR'):\n print(INVALID_COOR)\n elif(server_msg[1]=='USER_DISCONECTED'):\n print(USER_DISCONECTED)\n elif(server_msg[1]=='YSELF'):\n print(YSELF)\n elif(server_msg[1]=='NO_INV'):\n print(NO_INV)\n\n \n elif(server_msg[0]==\"INVITE\"):\n print(INVITE_REC.format(server_msg[1]))\n\n\n\n elif(server_msg[0]==\"BOARD\"):\n if(server_msg[1]=='1'):\n print(\"\\n\\nYour turn to play\\n\")\n elif(server_msg[1]=='0'):\n print(\"\\n\\nWaiting for oponent...\\n\")\n board=eval(original[8::])\n for i in range(len(board)):\n for k in range(len(board[i])):\n if(board[i][k]==0):\n board[i][k]=' '\n elif(board[i][k]==1):\n board[i][k]='X'\n else:\n board[i][k]='O'\n try:\n spacer=\"\"\n rows, columns = os.popen('stty size', 'r').read().split()\n rows = int(rows)\n columns = int(columns)\n tmp=int((columns-13)/2)\n for i in range(tmp):\n spacer+=\" \"\n except:\n pass\n print(\"\\n{} 0 1 2 \\n\".format(spacer)\\\n +\"{}0 {} {} {} {} {} \\n\".format(spacer,board[0][0],VLINE, board[0][1],VLINE, board[0][2])\\\n +spacer+HLINE\\\n +\"{}1 {} {} {} {} {} \\n\".format(spacer,board[1][0],VLINE, board[1][1],VLINE, board[1][2])\\\n +spacer+HLINE\\\n +\"{}2 {} {} {} {} {} \\n\".format(spacer,board[2][0],VLINE, board[2][1],VLINE, board[2][2]))\n\n elif(server_msg[0]==\"LIST\"):\n all_users=eval(original[5::])\n print(\"USER\\t|\\tSTATUS\")\n for i in all_users:\n print(\"{}\\t|\\t{}\".format(i[0], \"available\" if i[1] == 0 else \"unavailable\"))\n print(\"\\n\")\n\n elif(server_msg[0]==\"SERVER_OFF\"):\n print('\\n' + SERVER_OFF + '\\n\\n')\n exit_sig()\n sys.stdout.flush()", "async def draw_start(self):\n for i, player in enumerate(self.players):\n def bet_check(m):\n \"\"\"If the value can be converted to a float and is within the bounds return true, else false\"\"\"\n try:\n value = float(m.content)\n if 0 <= value <= player.coins:\n return True\n else:\n return False\n except:\n return False\n\n if not player.out:\n await self.ctx.send(f\"{self.users[i].name}, How much would you like to bet? You have {player.coins} in the bank: \")\n try:\n bet = await self.client.wait_for('message', timeout=120.0, check=bet_check)\n bet = float(bet.content)\n if bet == 0:\n player.out = True\n self.total_players_out += 1\n else:\n player.debit(bet)\n player.bet = bet\n except:\n await self.ctx.send(\"Timed Out!\")\n player.out = True\n self.total_players_out += 1\n # shuffle cards and dealer draws one, send the dealers hand to the channel, loop through all players that aren't out and show their hand\n # if all players arent out\n if self.total_players_out < len(self.players):\n self.deck.shuffle()\n self.dealer.clear()\n self.deck.move_cards(self.dealer, 1)\n\n embed_dealer = discord.Embed(title='Dealer', color=0x00ff00)\n embed_dealer.add_field(\n name=\"Hand\", value=self.dealer, inline=False)\n self.dealer_msg = await self.ctx.send(embed=embed_dealer)\n\n embed_players = discord.Embed(title='Players', color=0x0000fd)\n for i, player in enumerate(self.players):\n if not player.out:\n player.clear()\n self.deck.move_cards(player, 2)\n # name=their discord name and value = their hand\n embed_players.add_field(\n name=self.users[i].name, value=player, inline=True)\n if player.get_value() == 21:\n player.has_bj = True\n self.players_msg = await self.ctx.send(embed=embed_players)", "def showdown(self):\r\n\r\n poker_hands = []\r\n message = \"\"\r\n for player in self.players:\r\n poker_hands.append(player.hand.best_poker_hand(self.community_cards.cards))\r\n\r\n # Reveal all cards when the round is over\r\n player.reveal_cards()\r\n\r\n if poker_hands[0].type > poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].type), str(poker_hands[1].type))\r\n self.players[0].credits += self.pot\r\n\r\n if poker_hands[0].type < poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].type), str(poker_hands[0].type))\r\n self.players[1].credits += self.pot\r\n\r\n if poker_hands[0].type == poker_hands[1].type:\r\n if poker_hands[0].highest_values > poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].highest_values), str(poker_hands[1].highest_values))\r\n self.players[0].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values < poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].highest_values), str(poker_hands[0].highest_values))\r\n self.players[1].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values == poker_hands[1].highest_values:\r\n message = \"It is a draw! Both players had >{}< and highest value >{}<\".format(\r\n poker_hands[0].type.name, str(poker_hands[0].highest_values))\r\n\r\n for player in self.players:\r\n player.credits += (self.pot // len(self.players))\r\n else:\r\n self.game_message_warning.emit(\"Incorrect comparison of poker hands\")\r\n\r\n self.new_output.emit(message)\r\n self.game_message.emit(message)\r\n self.new_credits.emit()\r\n self.new_pot.emit()", "def lobby_screen_pick_deck_warning_button_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2):\n if button_status.lobby_screen_end_screen_warning_button_display == 'deck less than 40 cards':\n button = Button('You need at least 40','' ,(122,33,38),1050, 580, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('cards in your deck!','' ,(122,33,38),1050, 610, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 640, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 642, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)\n\n elif button_status.lobby_screen_end_screen_warning_button_display == 'no deck':\n button = Button('Please pick a deck','' ,(122,33,38),1050, 580, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('or build a new one!','' ,(122,33,38),1050, 610, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 640, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 642, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)", "def session_preparation(self):\n self.ansi_escape_codes = True\n self._test_channel_read()\n self.set_base_prompt()\n self.set_terminal_width(command=\"terminal width 511\", pattern=\"terminal\")\n self.disable_paging()\n # Clear the read buffer\n time.sleep(0.3 * self.global_delay_factor)\n self.clear_buffer()", "def display():\n screen.addch(head[0],head[1],'x')", "def session_preparation(self):\n self.ansi_escape_codes = True\n self._test_channel_read()\n self.set_base_prompt()\n self.disable_paging(command=\"terminal datadump\")\n\n # Clear the read buffer\n time.sleep(0.3 * self.global_delay_factor)\n self.clear_buffer()", "def hellraiser_weak():\r\n\r\n print(\"\\n\\nThe Hell Raiser pulls you back on the floor...\\n\\nLucifer trying to splash Holy water from jug kept on dining table...\")\r\n\r\n time.sleep(3)\r\n\r\n print(\"\\n\\nYou are battling hard with devil to save your life...\")\r\n\r\n time.sleep(3)\r\n \r\n print(\"\\n\\nLucifer tries to splash water on devil's face!\")\r\n \r\n time.sleep(2)\r\n \r\n holywater = input(\"\\n\\nType 'splash' to splash holy water: \")\r\n\r\n if holywater.lower() == \"splash\": # Create condition and compare if input from player matches the word, \"SPLASH\"\r\n\r\n print(\"\\n\\nLucifer splashes water on devil...\\n\\n Hell Raiser - 'Lucifer, stop it! You are getting crazy, don't splash me,, I can't survive!'\")\r\n\r\n time.sleep(2)\r\n\r\n print(\"\\n\\nHell Raiser now utilises his next skill - Fireballs!\")\r\n time.sleep(3)\r\n\r\n print(\"\\n\\n Hell Raiser - 'If you really want to make this uglier, take this!!!' \\n\\n\\n ***FIREBALLS*** ***FIREBALLS*** ***FIREBALLS***\")\r\n\r\n time.sleep(2)\r\n\r\n print(\"\\n\\nHelp Lucifer activate his double shield\")\r\n\r\n shield = input(\"\\nType 'shield' to enable: \")\r\n\r\n if shield == \"shield\": #Comparing player's response with \"SHIELD\"\r\n\r\n print(f\"\\n\\n Lucifer - '{name.title()}, speak out CHRISTO, it will make him weak!'\")\r\n\r\n christo = input(\"\\nType 'christo' to retaliate against Hell Raiser: \")\r\n\r\n if christo.lower() == \"christo\":\r\n\r\n print(f\"\\n Lucifer - 'Great, {name.title()}!, I will now emitt High Frequency noises on him...'\")\r\n\r\n time.sleep(2)\r\n\r\n high_noise = input(\"\\nType 'high' to activate Lucifer's noise: \")\r\n\r\n if high_noise.lower() == \"high\": # Comparing player's response with HIGH.\r\n \r\n print(\"\\n\\n Hell Raiser - 'Aaahhh! Stop this please! Lucifer, this is exhaustive...stop this noise! I am shaking and getting incapacitated!'\")\r\n\r\n time.sleep(3)\r\n\r\n print(\"\\nDevil's health deteriorates, he avoids further impact from Lucifer and vanishes again...\")\r\n\r\n time.sleep(3)\r\n\r\n # After Lucifer emits the noise, devil diappears. The player and Lucifer then heading to Blue room.\r\n \r\n\r\n print(\"\\nYou and Lucifer finally climb upstairs towards Blue Room.\")\r\n \r\n\r\n print(\"\\n\\t\\t\\t***STAIRCASE***\")\r\n\r\n time.sleep(3)\r\n\r\n staircase = \"\\n\\t\\t\\t\\t\\t _____ ***BLUE ROOM*** \\n\\t\\t\\t\\t\\t _|\\n\\t\\t\\t\\t _|\\n\\t\\t\\t\\t _|\\n\\t\\t\\t _|\\n\\t\\t\\t\\t _|\\n\\t **BLACK ROOM** _____|\"\r\n\r\n print(staircase)\r\n\r\n time.sleep(3)\r\n\r\n # On reaching the Blue room, they face another challenge to solve a riddle to enter the room.\r\n\r\n print(\"\\n\\nNow that you have reached Blue Room, there is a catch! \\nYou will have to solve riddle written on sticky note hung on doorknob.\")\r\n\r\n time.sleep(3)\r\n\r\n print(\"\\n\\nSolve the riddle given below: \")\r\n\r\n # Creating blueroom_riddle variable to display the riddle question and get input from player\r\n\r\n blueroom_riddle = input(\"\\n\\n\\tWhat Demands an Answer, But Ask no Questions?: \")\r\n\r\n if blueroom_riddle == \"telephone\": # Checking if player enters \"telephone\" as the answer\r\n\r\n print(f\"\\n\\n{blueroom_riddle.title()} is correct! You have now entered the room.\")\r\n \r\n\r\n ## If written TELEPHONE, they enter room and witness the Ancient Green Glass Box.\r\n \r\n\r\n print(\"\\n\\nAs you and Lucifer enter room, you are amazed to see the Ancient Green Glass Box resting on the floor.\")\r\n\r\n # Creating a variable to represent the box.\r\n\r\n ancient_box = \"\\n\\n\\t\\t\\t ***GREEN BOX*** \\n\\t\\t\\t\\t ___________\\n\\t\\t\\t\\t| |\\n\\t\\t\\t\\t| |\\n\\t\\t\\t\\t|___________|\"\r\n\r\n # Create a variable to represent floor.\r\n \r\n floor = \"\\n\\t\\t\\t\\t------------- \\n\\t\\t\\t\\t FLOOR \\n\\t\\t\\t\\t-------------\"\r\n\r\n # Print both.\r\n\r\n print(ancient_box)\r\n\r\n print(floor)\r\n\r\n time.sleep(3)\r\n\r\n ## The player meets the witch, Kijo, who demands to answer her 3 questions from the player to retain the box and win the game.\r\n \r\n print(\"\\nAlthough the box is at front of you, you encounter a witch, named Kijo.\")\r\n \r\n time.sleep(5)\r\n\r\n # Kijo welcoming the player.\r\n\r\n print(f\"\\n\\n Kijo - 'Welcome to the Blue room, {name.title()}! You proved to be a valiant human to reach this far.\\n\\n\\tHowever, you will have to answer 3 of my questions to get to the box.'\")\r\n\r\n time.sleep(5)\r\n\r\n # Kijo's conditions for each riddle...\r\n \r\n print(\"\\n\\nKijo will ask you three riddles... \\n\\nYou have 3 attempts to answer first riddle, 2 for second, and only 1 for the third.\\n\\nEach correct answer will bring you 3 steps closer to the box.\")\r\n\r\n time.sleep(5)\r\n\r\n \r\n print(\"\\n\\nBe mindful that failure to give correct answer at any stage will force you to get locked out of room forever. \\n\\nYou will not get the box and the game will be over.\")\r\n\r\n time.sleep(5)\r\n\r\n # Creating input statement for player to answer the questions\r\n\r\n yes = input(\"\\nType yes if ready to answer: \")\r\n\r\n if yes.lower() == \"yes\":\r\n\r\n ## First riddle:\r\n\r\n print(f\"\\nKijo - 'Okay, {name.title()}!, here is your first question:'\")\r\n\r\n guessestaken = 0 # Number of guesses taken by player is stored here.\r\n\r\n # Create the variable to print the first question\r\n\r\n firstriddle = print(\"\\nI Am Heavy And Hard To Pick Up, But Backwards I Am Not. What Am I?\")\r\n \r\n \r\n\r\n while guessestaken < 3: # Create While loop - the condition states that unless the player gives write answer to question,\r\n # offer 3 attempts\r\n \r\n\r\n print(\"\\nGive Your Answer\\n\")\r\n \r\n answer = str(input()) # The variable created for player's input. This input stores player's answer and convert into string.\r\n\r\n guessestaken = guessestaken + 1 # Instruct the system to update the guesses taken by the player.\r\n \r\n\r\n # Creating conditional statements for player's answer.\r\n\r\n if answer.lower() == \"ton\":\r\n\r\n # If he answers \"TON\", then print the below statement.\r\n\r\n print(f\"\\n\\n{answer.title()} is correct! You move 3 steps closer to box. Solve next question.\")\r\n\r\n time.sleep(3)\r\n\r\n # After successfully completing first question, head on to second question:\r\n \r\n\r\n ## Second riddle:\r\n\r\n # Kijo appreciating the player:\r\n\r\n print(f\"\\n\\n Kijo - 'Well done, {name.title()}! Here is your next question:'\")\r\n\r\n nextguess = 0 # Number of guesses taken by player for the second question's answer\r\n\r\n # Print the second question:\r\n\r\n secondriddle = print(\"\\nWhat work of writing can one never finish?\")\r\n \r\n\r\n while nextguess < 2: # Creating While loop for second question, this time with 2 attempts.\r\n\r\n print(\"\\nGive Your Answer\\n\")\r\n\r\n secondanswer = str(input()) # Create input for second answer\r\n\r\n nextguess = nextguess + 1 # Update the guesses\r\n\r\n # Create Conditional Statements for Second Question:\r\n \r\n\r\n if secondanswer.lower() == \"autobiography\": # If player's answer is \"AUTOBIOGRAPHY', execute following:\r\n \r\n print(f\"\\n\\n{secondanswer.title()} is correct! You move 3 steps more closer to box. Solve last question.\")\r\n\r\n time.sleep(3)\r\n\r\n # If successful, then head on to last question:\r\n\r\n ## Third riddle:\r\n\r\n print(f\"\\n\\n Kijo - 'Great, {name.title()}! Solve the final question to retain the box:'\")\r\n\r\n finalguess = 0 # Number of guesses taken by player for final question's answer\r\n\r\n # end = '' used to continue printing the next lines of strings together in console.\r\n\r\n print(\"\\n\\n\\t\\t CAN YOU OPEN THIS LOCK ?\", end='')\r\n print(\" \\n\\n\\n\\t\\t 206: Two digits are right but both are in the wrong place\", end='')\r\n print(\" \\n\\n\\t\\t 738: All Digits are wrong\", end='')\r\n print(\" \\n\\n\\t\\t 380: One digit is right but in the wrong place\",end='')\r\n print(\" \\n\\n\\t\\t 682: One digit is right and in its place\", end='')\r\n print(\" \\n\\n\\t\\t 614: One digit is right but in the wrong place\", end='')\r\n\r\n while finalguess < 1: # Create While loop with only 1 attempt...\r\n\r\n print(\"\\nGive Your Answer\\n\")\r\n\r\n finalanswer = str(input())\r\n\r\n finalguess = finalguess + 1\r\n\r\n if finalanswer.lower() == \"042\": # Check and Compare player's answer with \"042\".\r\n \r\n\r\n print(f\"\\n\\n{finalanswer.title()} is absolutely correct!\")\r\n \r\n time.sleep(3)\r\n \r\n print(f\"\\n\\n Kijo - 'Congratulations, {name.upper()}!!! You finally were able to retain the box!!! \\n\\n\\t\\t***YOU WIN :)***\")\r\n\r\n sys.exit() # Exit the game.\r\n\r\n \r\n\r\n if finalanswer.lower() != \"042\": # Creating Conditions if player's answer for third question does not match \"042\"\r\n\r\n # Print following statements and end the game.\r\n \r\n print(\"\\nSorry, you did not give the correct answer :(\")\r\n\r\n time.sleep(3)\r\n\r\n print(\"\\nYou are locked out of room! \\n\\nGave Over!\")\r\n\r\n sys.exit()\r\n\r\n \r\n\r\n if secondanswer.lower() != \"autobiography\": # Creating similar Condition for Second Question, if player's answer does not match \"AUTOBIOGRAPHY\"\r\n\r\n # Print the following and end the game.\r\n \r\n print(\"\\nSorry, you did not give the correct answer :(\")\r\n\r\n time.sleep(3)\r\n \r\n print(\"\\nYou are locked out of room! \\n\\nGame Over!\")\r\n\r\n sys.exit()\r\n \r\n\r\n if answer.lower() != \"ton\": # Finally, creating the Condition for First Question if player's answer not matched with \"TON\"\r\n\r\n # Print the following statements and end the game\r\n \r\n print(\"\\nSorry, you did not give the correct answer :(\")\r\n\r\n time.sleep(3)\r\n\r\n print(\"\\nYou are locked out of room! \\n\\nGame Over!\")\r\n\r\n sys.exit()\r\n \r\n else: # If to open Blue room, the player writes answer other than \"TELEPHONE\", restart the game.\r\n\r\n print(\"You gave wrong answer, sorry but you will have to restart the game :(\")\r\n\r\n sys.exit()", "def prompt():\r\n\tglobal mhp\r\n\tglobal php\r\n\tglobal pen\r\n\tglobal fo\r\n\tglobal men\r\n\tprint \"Socanda %dHP-%dEn, %s %dHP-%dEn\" % (php, pen, fo, mhp, men)", "def prompt_player(self):\n board = self.draw_board()\n print board\n self.player_moves(self.board_values)", "def introducer(self):\r\n\t\t#Introduces the program.\r\n\t\tprint(\"\\n\"*4)\r\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\"*2)\r\n\t\tprint(\"This is a program that will encrypt or decrypt a message or file.\")\r\n\t\tprint(\"Remember to use the same key that you used to encrypt the message or file to decrypt.\")\r\n\t\tprint(\"You can press Ctrl c at anytime to quit the program.\\n\")\r\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\"*2)\r\n\r\n\t\t#Sets the program to encrypt or decrypt. Will keep asking if a non-answer is given.\r\n\t\tprint(\"1. Encrypt\")\r\n\t\tprint(\"2. Decrypt\")\r\n\t\tprint(\"3. Hack\")\r\n\t\twhile True:\r\n\t\t\tself.code_mode = input(\"Enter number to encrypt or decrypt. --> \")\r\n\t\t\tif self.code_mode == \"1\" or self.code_mode == \"2\" or self.code_mode == \"3\":\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tcontinue\r\n\r\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\r\n\r\n\t\t#Sets the input mode. Will keep asking if a non-answer is given.\r\n\t\tprint(\"1.Type or paste message\")\r\n\t\tprint(\"2.Provide message in a text file.\")\r\n\t\twhile True: \r\n\t\t\tself.input_mode = input(\"Enter number to indicate input mode. --> \")\r\n\t\t\tif self.input_mode == \"1\" or self.input_mode == \"2\": \r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tcontinue\r\n\r\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")", "def choice2(choice, ghost):\n if choice == \"2\":\n if \"mirrors\" not in items:\n print_pause(\"The church street is totally empty and dark\", 2)\n print_pause(\n \"You only hear the sound of\" +\n \"the wind that runs through the hall\", 2)\n mirror_choice(ghost)\n else:\n print_pause(\n \"You allready pasted this way\\nPlease choose another way!\", 2)\n logic(ghost)", "def main():\n\n # after first round it will ask if want to change word list\n first_round = True\n\n # 1.\n print_game_logo()\n\n # will break out of loop when the player wouldn't want another round\n while True:\n if not first_round:\n if get_yes_no(\"Would you like to switch to a different word-list?\"):\n # 2.1.\n print_game_logo()\n word_list_path = change_word_list()\n else:\n sys_comment(\"Playing with the same word-list\")\n else:\n # 2.1.\n word_list_path = change_word_list()\n first_round = False\n\n # 2.2.\n secret_word = change_secret_word(word_list_path)\n\n # starting the game\n hangman(secret_word)\n\n # finished the game - ask if want another round\n if get_yes_no(\"Would you like to play another game?\"):\n sys_comment(\"Starting another game\")\n else:\n sys_comment(\"Quitting\")\n break\n\n return None", "def main():\r\n lp = launchpad_py.Launchpad() \r\n lp.Open()\r\n lp.LedAllOn(0)\r\n displayField(lp)\r\n player = 1\r\n while True:\r\n time.sleep(0.01)\r\n if player == 1:\r\n letter = \" X \"\r\n if player == 2:\r\n letter = \" O \"\r\n if setCross(lp, player, field, letter):\r\n if player == 1:\r\n player = 2\r\n else:\r\n player = 1\r\n if theWinnerIs(field, letter):\r\n if letter == \" X \":\r\n allOnForWinner(field,letter,lp)\r\n if letter == \" O \":\r\n allOnForWinner(field,player,lp)\r\n break\r\n if equal(field):\r\n lp.LedAllOn(lp.LedGetColor(3, 3))\r\n break", "def draw(c):\n c.draw_line((0,130), (580,130), 200, mood)\n c.draw_line((0,450), (290,450), 200, p1mood)\n c.draw_line((290,450), (580,450), 200, p2mood)\n c.draw_line((0,290), (580,290), 200, \"black\")\n c.draw_text(format(current), (150, 330), 110, \"yellow\")\n \n c.draw_line((193,108), (387,108), 120, \"#000080\")\n c.draw_line((0,25), (580,25), 50, \"#00FFFF\")\n c.draw_text(\"SINGLE PLAYER\", (20, 34), 30, \"#191970\")\n c.draw_text(\"Score\", (250, 90), 30, \"white\", \"sans-serif\")\n c.draw_line((250,96), (329,96), 4, \"white\")\n c.draw_text(score(wins, tries), scorepos, 40, \"white\", \"sans-serif\")\n \n c.draw_line((66,472), (220,472), 120, \"#556B2F\")\n c.draw_line((360,472), (514,472), 120, \"#4B0082\") \n c.draw_line((0,555), (580,555), 50, \"#F4A460\")\n c.draw_text(\"TWO PLAYER\", (20, 566), 30, \"#800000\")\n c.draw_text(\"Player 1\", (90, 454), 30, \"#F0E68C\", \"sans-serif\")\n c.draw_line((90,464), (200,464), 4, \"#F0E68C\")\n c.draw_text(score_string(p1score), p1scorepos, 40, \"#F0E68C\", \"sans-serif\")\n c.draw_text(\"Player 2\", (380,454), 30, \"#E6E6FA\", \"sans-serif\")\n c.draw_line((380,464), (490,464), 4, \"#E6E6FA\")\n c.draw_text(score_string(p2score), p2scorepos, 40, \"#E6E6FA\", \"sans-serif\")\n c.draw_line((0,440), (580,440), result2pline, \"#F7DE00\")\n c.draw_text(result2p, (180,450), 35, \"black\")", "def lets_get_punny():\n \n text = '\\033[35;1m' # text color\n background = '\\033[30;1;45m'\n \n chat = True\n while chat:\n\n # Get a message from the user\n msg = input(background + 'You say \\U0001F4AC:\\t')\n out_msg = None\n \n #Checks if input has question mark\n question = is_question(msg)\n defined_question = how_question(msg)\n \n # Checks if input has exclamation point\n exclamation = is_screaming(msg)\n\n # Prepare the input message\n msg = prepare_text(msg)\n\n # Check for an end msg = \n if end_chat(msg):\n out_msg = '¡Adiós! \\U0001F44B'\n print(out_msg)\n break\n \n # all my message outputs here \n if not out_msg:\n \n outs = []\n \n outs.append(selector(msg, GREETING_IN, GREETING_OUT)) # Greetings\n \n outs.append(selector(msg, QUESTION_GREETING_IN, QUESTION_GREETING_OUT))\n \n outs.append(selector(msg, JOKE_REQUEST_IN, JOKE_REQUEST_OUT)) # Responses for certain questions\n outs.append(selector(msg, NO_JOKE_IN, NO_JOKE_OUT))\n outs.append(selector(msg, NO_JOKE_REPLY_IN, NO_JOKE_REPLY_OUT))\n \n outs.append(selector(msg, YES_JOKE_IN, YES_JOKE_OUT))\n outs.append(selector(msg, YES_JOKE_REPLY_IN, JOKE_REPLY_OUT))\n \n # How jokes get responses works\n msg_str = ' '.join(msg)\n msg_str = msg_str.lower()\n \n if msg_str in JOKE_REPLY_IN_2:\n name = find_in_list(msg, JOKE_REPLY_IN_2)\n outs.append(joke_reply_2(msg))\n \n outs.append(respond_echo(selector(msg, LAUGH_IN, LAUGH_OUT), 1, \"\\U0001F923 \"))\n \n options = list(filter(None, outs))\n \n if options:\n out_msg = random.choice(options)\n \n if not out_msg and exclamation: \n out_msg = random.choice(SCREAMING)\n \n if not out_msg and question:\n out_msg = text + random.choice(UNKNOWN_QUESTION)\n\n # Catch-all to say something if msg not caught & processed so far\n if not out_msg:\n out_msg = random.choice(UNKNOWN)\n\n print(text + 'JokeBot \\U0001F47E:\\t', out_msg + '\\n')", "def head_plain():\n print (hair_buzz())\n print (eye_narrow())\n print (nose_triangle())\n print (mouth_smile())\n print (chin_plain())", "def correct_response(self):\n \n self.play_sound(choice(self.correct_sfx), self.standard_sfx, wait=True)\n self.update_points(True)\n# self.check_level()\n self.get_new_prompt()\n self.frames_passed = 0", "def start_prompt(message):\n reply = ' '.join((\n \"Press and hold screen button with microphone picture.\",\n \"Say your phrase and release the button.\",\n ))\n return bot.reply_to(message, reply)", "def print_hands(self):\n # Clear the terminal and reprint round header\n os.system(\"clear\")\n self.print_header\n\n # Only display one of the dealers cards if they are still playing\n if not self.round_winner:\n print()\n print(\"Dealer's Cards\")\n print(\"=\" * 25)\n print(\"UNKNOWN\")\n for card in self.dealer.cards:\n if card != self.dealer.cards[0]:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\"*25)\n print(\"TOTAL = ?\")\n print()\n\n print(\"Player's Cards\")\n print(\"=\" * 25)\n for card in self.player.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.player.sum_cards()))\n print()\n\n # Display the players cards and all of the dealers cards\n elif self.round_winner:\n print()\n print(\"Dealer's Cards\")\n print(\"=\" * 25)\n for card in self.dealer.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.dealer.sum_cards()))\n print()\n\n print(\"Player's Cards\")\n print(\"=\" * 25)\n for card in self.player.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.player.sum_cards()))\n print()\n pass", "def bandit_camp_begin():\n\n bandits = enemies.bandits()\n os.system(\"clear\")\n print(\"After almost 10 hours of rowing, you finally arrive to partly sunken, bandit cave.\")\n time.sleep(2)\n print(\"You stepped out of the boat and try to tie boats rope to big rock on a shore.\")\n time.sleep(2)\n print(\"Suddenly you hear bandits war cry: Kill the intruder!!!!\")\n time.sleep(2)\n print(\"You turn back to face this new danger!\")\n print()\n input(\"Press Enter to continue...\")\n combat.combat(bandits)", "def play(self):\n self.mu1=random.randrange(20,40,1)\n self.sd=round(self.mu1*(5/30))\n self.loss=random.randrange(5,20,1)\n self.alpha = 1.96 # set to 5%\n self.beta = 0.84 # set to 80%\n self.mu2= self.mu1-self.mu1*(self.loss/100)\n self.n=((self.alpha+self.beta)/((self.mu2-self.mu1)/self.sd))**2\n # clear all fields \n self.clear() \n #\n # Add instruction text to the first text window\n self.instruction_message=(\"If you have a population mean of %s \"\n \"\\nand a standard deviation of %s,\"\n \"\\nwhat sample size is required\\n\" \n \"to detect a loss of %s percent? \\n\"\n \"\\nNote: alpha = 5 percent and power = 80 percent\" %(self.mu1,self.sd,self.loss))\n \n self.text_instructions.insert(0.0,self.instruction_message)\n # disable the play button so it cannot be pressed again", "def draw(self, draw_surface):\n if not self._initial_prompt.is_over():\n self._initial_prompt.draw(draw_surface)\n elif self._response == 2 and not self._seeya_dialogue.is_over():\n self._seeya_dialogue.draw(draw_surface)\n elif self._response == 0 and not self._buy_menu.is_over():\n draw_surface.blit(self._money_surface, (2, 2))\n draw_surface.blit(self._help_surface, (0, 111))\n self._buy_menu.draw(draw_surface)\n elif self._response == 1 and not self._sell_menu.is_over():\n self._sell_menu.draw(draw_surface)", "def speech_response_prompt(output, reprompt_text, endsession):\n\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': endsession\n }", "def speech_response_prompt(output, reprompt_text, endsession):\n\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': endsession\n }", "def main():\n\n parser = argparse.ArgumentParser(usage='python3 SSHBastard.py CIDR -u USER -f PASSFILE')\n parser.add_argument('cidr', type=str, metavar='CIDR', help=\"set target CIDR\")\n parser.add_argument('-u', type=str, metavar='USERNAME', required=True, help='set user name')\n parser.add_argument('-f', type=str, metavar='PASSWD_FILE', required=True, help='set passwords file')\n\n args = parser.parse_args()\n target_cidr = args.cidr\n passwd_file = args.f\n user = args.u\n\n print(\"\"\"\\033[91m\n\n █▀ █▀ █░█ █▄▄ ▄▀█ █▀ ▀█▀ ▄▀█ █▀█ █▀▄\n ▄█ ▄█ █▀█ █▄█ █▀█ ▄█ ░█░ █▀█ █▀▄ █▄▀ \n\n [email protected] | For educational use only\n \\x1b[0m\"\"\")\n\n # animation = [\"10%\", \"20%\", \"30%\", \"40%\", \"50%\", \"60%\", \"70%\", \"80%\", \"90%\", \"100%\"]\n animation = [\"\\t\\t\\t[■□□□□□□□□□]\", \"\\t\\t\\t[■■□□□□□□□□]\", \"\\t\\t\\t[■■■□□□□□□□]\", \"\\t\\t\\t[■■■■□□□□□□]\", \"\\t\\t\\t[\"\n \"■■■■■□□□□□]\",\n \"\\t\\t\\t[■■■■■■□□□□]\", \"\\t\\t\\t[■■■■■■■□□□]\", \"\\t\\t\\t[■■■■■■■■□□]\", \"\\t\\t\\t[■■■■■■■■■□]\",\n \"\\t\\t\\t[■■■■■■■■■■]\"]\n\n for i in range(len(animation)):\n time.sleep(0.3)\n sys.stdout.write(\"\\r\" + animation[i % len(animation)])\n sys.stdout.flush()\n\n print(\"\\n\")\n\n with open(passwd_file) as file:\n for line in file.readlines():\n for ip in ipaddress.IPv4Network(target_cidr):\n if Found:\n exit(0)\n if Fails > 5:\n print(\"[!] Exiting: Too Many Socket Timeouts\")\n exit(0)\n connection_lock.acquire()\n password = line.strip('\\r').strip('\\n')\n print(\"[-] Testing: \" + str(password) + \" for user \" + user + \" on host \" + str(ip))\n t = threading.Thread(target=connect, args=(str(ip), user, password))\n t.start()", "def main():\n codedmessage = ReadCodedMessage()\n PlayCodedMessage(codedmessage)\n PlayAgain(codedmessage)\n message = DecodeCodedMessage(codedmessage)\n if (message==\"?\"):\n if DEBUG:print(\"Unknown code - try again!\")\n else:\n if DEBUG:print (\"Message: \", message)", "def _pre_image_append(self, msg, prompt_number):\n self._append_plain_text(self.output_sep, True)\n self._append_html(self._make_out_prompt(prompt_number), True)\n self._append_plain_text('\\n', True)", "def base(self):\n time.sleep(1)\n print(CLEAR)\n print(\"\"\"\n------------------------------------------------------\n {1}Welcome to the Station!{0}\n \n{2}* Type any below given commands and press Enter {0}\n {2}* Just press Enter to exit the station{0}\n \n {3}{4}AP{0}{5} to configure the Access point settings {0}\n {3}{4}R{0}{5} to start the Radar {0}\n {3}{4}C{0}{5} to auto-connect to a saved network {0}\n {3}{4}MC{0}{5} to manually connect to a saved network {0}\n {3}{4}A{0}{5} to add a new network {0}\n {3}{4}D{0}{5} to remove a network {0}\n------------------------------------------------------\n\"\"\".format(END, HEADER, YELLOW, PROMPT, BOLD, BLUE))\n\n command = input(PROMPT).lower()\n\n\n if command == COMMANDS[0]:\n self.access_point()\n\n elif command == COMMANDS[1]:\n self.radar()\n\n elif command == COMMANDS[2] and SSID_EXISTS:\n self.auto_connect()\n\n elif command == COMMANDS[3] and SSID_EXISTS:\n self.manually_connect()\n\n elif command == COMMANDS[4]:\n self.add_a_network()\n\n elif command == COMMANDS[5] and SSID_EXISTS:\n self.delete_a_network()\n\n elif (command in COMMANDS[2] or command in COMMANDS[3] or command in COMMANDS[5]) and not SSID_EXISTS:\n print(\"{1}No saved network/s.{0}\".format(END, YELLOW))\n\n\n else:\n pass", "def hangman_figure(attempt_left):\n if attempt_left == N_TURNS:\n print('___________')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 1:\n print('___________')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 2:\n print('___________')\n print('| |')\n print('| O')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 3:\n print('___________')\n print('| |')\n print('| O')\n print('| |')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 4:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 5:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 6:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| /')\n print('| |')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 7:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 8:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| |')\n print('|_____')\n if attempt_left == N_TURNS - 9:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| | |')\n print('|_____')\n if attempt_left == N_TURNS - 10:\n print('___________')\n print('| |')\n print('| -O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| | |')\n print('|_____')\n if attempt_left == N_TURNS - 11:\n print('___________')\n print('| |')\n print('| -O-')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| | |')\n print('|_____')", "def reset(self):\r\n self.player_hand.reset()\r\n self.dealer_hand.reset()\r\n self.player_hand.add(self.deck.deal())\r\n self.player_hand.add(self.deck.deal())\r\n self.dealer_hand.add(self.deck.deal())\r\n self.dealer_hand.add(self.deck.deal())\r\n # Checking for edge cases where player/dealer (or both) have two aces\r\n if self.player_hand.total == 22 and self.dealer_hand.total == 22:\r\n self.status_color = 'red'\r\n self.game_status = \"TIE Game... Press 'r' to start game\"\r\n self.in_progress = False\r\n elif self.player_hand.total == 22:\r\n self.status_color = 'red'\r\n self.game_status = \"Dealer WINS... Press 'r' to start game\"\r\n self.dealer_wins += 1\r\n self.in_progress = False\r\n elif self.dealer_hand.total == 22:\r\n self.status_color = 'red'\r\n self.game_status = \"Player WINS... Press 'r' to start game\"\r\n self.player_wins += 1\r\n self.in_progress = False\r\n else:\r\n self.game_status = 'In Progress...'\r\n self.status_color = 'green'\r\n self.in_progress = True\r\n self.refresh_canvas()", "def prepare_command(self, arg=None):\n\n self.textwin.print_blank(0)\n self.textwin.win.addch(0, 0, ':')\n self.textwin.win.chgat(0, 1, 1, curses.A_STANDOUT)\n\n self.inp = True\n self.command_event.set()", "def main(screen):\n\n max_y, max_x = screen.getmaxyx()\n\n \n pass_screen_h, pass_screen_w = {5, 25}\n \n pass_screen_y_off,pass_screen_x_off = {pass_screen_h/2, pass_screen_w/2}\n\n pass_text_y_off = 0\n pass_text_x_off = 3\n\n pass_win = screen.subwin(5, 25,(max_y/2) - pass_screen_y_off, (max_x/2) - pass_screen_x_off)\n pass_win.box()\n pass_win.addstr(1, 2, \"Please enter password\")\n\n pass_box = screen.subwin(1, 8,(max_y/2) - pass_text_y_off, (max_x/2) - pass_text_x_off)\n\n# pass_box.box()\n screen.refresh()\n tb = curses.textpad.Textbox(pass_box)\n \n text = tb.edit()\n while 1:\n c = screen.getch()", "def flipper(deck, message): #flips card in player hand\r\n\tflipcheck, flipcheck1 = 1, 0\r\n\ttempHand = []\r\n\r\n\tprint message,\r\n\ttime.sleep(0.33);print \".\",\r\n\ttime.sleep(0.33);print \".\",\r\n\ttime.sleep(0.34);print \".\"\r\n\r\n\ttry:\r\n\t\twhile flipcheck == 1:\r\n\t\t\ttry:\r\n\t\t\t\ttempHand = random.choice(deck) #grab card from player/cpu hand\r\n\t\t\t\tflipcheck = 0\r\n\r\n\t\t\texcept(TypeError):\r\n\t\t\t\tflipcheck1 += 1\r\n\r\n\t\t\t\tif flipcheck1 == 5:\r\n\t\t\t\t\tsys.exit(TypeError)\r\n\r\n\t\tif tempHand in deck:\r\n\t\t\tdeck.remove(tempHand) #removes tempHand from player/cpu hand\r\n\r\n\texcept(IndexError):\r\n\t\tpass\r\n\r\n\tif type(tempHand) == list:\r\n\t\tprint \"The card was a \" + str(tempHand[1]) + \" of \" + str(tempHand[0]) + \"!\\n\"\r\n\r\n\telse:\r\n\t\tprint \"The card was the \" + tempHand + \" wild card!\"\r\n\r\n\t\tif tempHand == 'MasterSpark': #MasterSpark Wild Card\r\n\t\t\tif deck == playerDeck:\r\n\t\t\t\tplayerScore -= 10\r\n\t\t\t\tprint 'MasterSpark!'\r\n\t\t\t\tplayerDisplayed.remove('MasterSpark')\r\n\t\t\telif deck == cpuDeck:\r\n\t\t\t\tplayerScore -= 10\r\n\t\t\t\tprint 'MasterSpark!'\r\n\t\t\t\tcpuDisplayed.remove('MasterSpark')\r\n\r\n\treturn [tempHand, deck] #returns two values. use arrays to get correct values with tempGrab[]\r", "async def blackjack(self, ctx, arg: int): \n db = sqlite3.connect('main.sqlite')\n cursor = db.cursor()\n cursor.execute(f'SELECT user_id, jacks FROM main WHERE user_id = {ctx.author.id}')\n result = cursor.fetchone()\n embed = discord.Embed(color=0x228b22, title=\"Blackjack\")\n if result is not None:\n if arg > result[1]:\n embed.add_field(name=\"Error\", value=f\"You can't bid more chips than you have!\", inline=False)\n embed.set_footer(text=\"You can check your balance using the *profile* command\")\n else:\n player, house = [],[]\n deck.deal(player,2)\n deck.deal(house, 2)\n embed.add_field(name=\"Your Hand:\", value=f\"```{deck.display_hand(player)}``` \\n Value: {deck.hand_value(player)}\")\n embed.add_field(name=\"Dealer's Hand:\", value=f\"```['{deck.display_hand(house)[1]}', '?'] ``` \\n Value: ?\")\n embed.set_footer(text=\"Type `hit` or `stay` to take your turn!\")\n await ctx.send(content=None, embed=embed)\n if deck.hand_value(house) != 21 and deck.hand_value(player) != 21:\n msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author)\n while msg.content.startswith(\"hit\") or msg.content.startswith(\"Hit\"):\n embed.remove_field(0)\n deck.deal(player)\n embed.insert_field_at(0, name=\"Your Hand:\", value=f\"```{deck.display_hand(player)}``` \\n Value: {deck.hand_value(player)}\")\n await ctx.send(content=None, embed=embed)\n if deck.hand_value(player) > 21:\n break\n msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author)\n embed.remove_field(1)\n embed.set_footer(text=\"\")\n deck.house_turn(house)\n embed.add_field(name=\"Dealer's Hand:\", value=f\"```{deck.display_hand(house)}``` \\n Value: {deck.hand_value(house)}\")\n if deck.hand_value(player) == 21:\n outcome = \"Blackjack!\"\n bal = \"won\"\n chips = int(result[1] + arg*1.5)\n elif deck.hand_value(player) > 21:\n outcome = \"Player bust, you lose\"\n bal = \"lost\"\n chips = int(result[1] - arg)\n elif deck.hand_value(house) > 21:\n outcome = \"Dealer bust, you win!\"\n bal = \"won\"\n chips = int(result[1] + arg)\n elif deck.hand_value(player) > deck.hand_value(house):\n outcome = \"Win!\"\n bal = \"won\"\n chips = int(result[1] + arg)\n elif deck.hand_value(player) == deck.hand_value(house):\n outcome = \"Push, chips back\"\n bal = \"gotten back your\"\n chips = int(result[1])\n else:\n outcome = \"Loss\"\n bal = \"lost\"\n chips = int(result[1] - arg)\n sql = (\"UPDATE main SET jacks = ? WHERE user_id = ?\")\n val = (chips, ctx.author.id)\n cursor.execute(sql, val)\n db.commit()\n cursor.close()\n db.close()\n if chips == int(result[1]):\n chips += arg\n embed.add_field(name=outcome, value=f\"You have {bal} <:chip:657253017262751767> **{abs(int(result[1] - chips))}** chips\", inline=False)\n await ctx.send(content=None, embed=embed)\n else:\n await ctx.send(\"You must register before you can play blackjack!\")", "def init():\n unicornhathd.rotation(270)\n unicornhathd.brightness(1.0)\n\n stdscr = curses.initscr()\n curses.cbreak()\n curses.noecho()\n stdscr.nodelay(1)\n stdscr.keypad(1)\n\n return stdscr", "def prepare_to_advance(self):\n\n self.capture_user_input()\n self.UI.reset_figure()\n # stopping the blocking event loop\n self.fig.canvas.stop_event_loop()", "def self_play_visualisation(board_size=BOARD_SIZE):\n policy_value = SimpleCNN([board_size, board_size, 2])\n history, winner = play_game(policy_value=policy_value)\n print(\"Watching game replay\\nPress Return to advance board\")\n for state, board, hoice in history:\n print(state)\n input(\"\")\n\n if winner == 1:\n print(\"Black won\")\n else:\n print(\"White won\")", "def play_command(update,context):\n update.message.reply_text('Rkrt: Welcome on board. Let\\'s see if you are worth the challenge. To find the invite code and land on planet hackazon you will need to solve this first. Ready for a ride?!')\n time.sleep(5)\n update.message.reply_text('Mx: During intergalactical travel, time does not matter. Any enemies could be listening in at any time. This is why the crew is sometimes forced to used coded languages to exchange messages between vessels. To decrypt messages every crew member can use the key on their hardware tokens.')\n time.sleep(10)\n update.message.reply_text('Jms: Mx we are getting a distress signal from vessel Vigenere. Do you copy?')\n time.sleep(3)\n update.message.reply_text('Mx: [gasps...]')\n time.sleep(1)\n update.message.reply_text('Mx: This one is for you rookie... See you on the other side.')\n update.message.reply_text('Kyjkda kghc tir Yeevobyj: BgXfsGofrCyrDouwfh\\r\\nUsfcfqg zb dywzv lcfy ij cqff hsnal jjoa:\\r\\nCKJ{en55td2my6jse8361a427p3xf319tf12}')", "def flush(handIn):", "def hellraiser_kills_player():\r\n\r\n # The below docstring explains the use of this function.\r\n \r\n\r\n print(\"\\nThe Hell Raiser pulls you back on the floor...\\n\\nLucifer trying to splash Holy water from jug kept on dining table...\")\r\n\r\n time.sleep(3)\r\n\r\n print(\"\\nYou are battling hard with devil to save your life...\")\r\n\r\n time.sleep(3)\r\n \r\n print(\"\\nLucifer tries to splash water on devil's face!\")\r\n \r\n time.sleep(2)\r\n \r\n holywater = input(\"\\n\\nType 'splash' to splash holy water: \")\r\n\r\n if holywater.lower() == \"splash\":\r\n\r\n print(\"\\nLucifer splashes water on devil...\\n\\nHowever the Hell Raiser has become ultra strong this time and does not repel to water...\")\r\n\r\n time.sleep(2)\r\n\r\n print(\"\\n\\nIt's time to use your tactics; Cry out 'CHRISTO'!\")\r\n\r\n time.sleep(3)\r\n\r\n christo = input(\"\\nType 'christo' to retaliate against Hell raiser: \")\r\n\r\n if christo.lower() == \"christo\":\r\n\r\n print(\"\\n\\t***CHRISTO***\")\r\n\r\n print(f\"\\n\\nHell Raiser - 'Ahhh will you just stop it!!!? Those words are no more frightening to me. \\n\\t I am not gonna leave you now!!!'\")\r\n \r\n time.sleep(3)\r\n\r\n print(\"\\n\\tYou are screaming CRISTO, whilt Lucifer keeps splashing water...\")\r\n\r\n time.sleep(3)\r\n \r\n print(\"\\n You find salt saucer on dining table...\")\r\n \r\n time.sleep(3)\r\n\r\n print(\"\\nLucifer tells you to throw salt on devil's eyes...\")\r\n\r\n salt = input(\"Type 'salt' to rub on devil: \")\r\n\r\n if salt.lower() == \"salt\":\r\n \r\n print(\"\\n\\n Hell Raiser - 'You bloody maniac! How dare you rub salt on me!...'\")\r\n \r\n time.sleep(3) \r\n\r\n print(\"\\nUnfortunately this time, the devil has garnered extreme resilience and agility...\")\r\n\r\n time.sleep(2)\r\n\r\n print(\"\\nThe Hell Raiser grabs you by his tough hands...\\n and throws his demonic knife on Lucifer's chest, killing him on the spot.\")\r\n\r\n time.sleep(5)\r\n\r\n print(\"\\nYou fought hard with Lucifer, but the devil drags you down to hell and finally eliminates you!\")\r\n\r\n time.sleep(1)\r\n\r\n print(\"\\n\\n\\t\\t Game Over !!! \\n\\n\\t\\tBetter luck next time!\")\r\n\r\n sys.exit()", "def confirm_start(self, player=str):\n self.clear_screen()\n print(\"\\n\" * 11)\n pass_text = \"Pass the device to \" + player\n print(f\"{pass_text : ^100}\")\n input(f\"{'Press ENTER when ready.' : ^100}\")\n return self.stop_game", "def battle_screen_my_hand_card_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n rect_position_x = 100\n rect_position_y = 610\n row_number = 1\n if screen_status.battle_screen_action_indicator == 'stage-0':\n pass\n else :\n\n if screen_status.battle_screen_my_hand_page_id <= 0:\n screen_status.battle_screen_my_hand_page_id = 1\n # Edge cases when len() = 6,12,18....\n if len(user.hand_list) % 7 == 0 and len(user.hand_list) != 0:\n if screen_status.battle_screen_my_hand_page_id >= (len(user.hand_list))//7 + 1:\n screen_status.battle_screen_my_hand_page_id = (len(user.hand_list))//7 + 0\n\n else:\n if screen_status.battle_screen_my_hand_page_id >= (len(user.hand_list))//7 + 2:\n screen_status.battle_screen_my_hand_page_id = (len(user.hand_list))//7 + 1\n # Algorithm to draw all cards in local_store_list, 6 card per page.\n for card in user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]:\n if row_number <= 7:\n card.rect.x = rect_position_x\n card.rect.y = rect_position_y\n screen.blit(card.image, card.rect)\n rect_position_x += 145\n row_number += 1\n if row_number >= 8:\n row_number = 1", "def put_prompt(self, session):\n self.reply_text(session, self._prompt, False)", "async def _get_sketch_prompt(self):\n\n drawing_prompt = self._get_daily_drawing_prompt()\n\n if drawing_prompt == '':\n # No drawing prompt found for today; don't do anything\n return\n elif not drawing_prompt == self.current_prompt:\n # The prompt we pulled does not match what we found before, so post the new text.\n for channel in self.bot.get_all_channels():\n if channel.name == ConfiguredCog.config['content']['daily_prompt_channel'] \\\n and isinstance(channel, TextChannel):\n # Build the prompt message\n color = ConfiguredCog.convert_color(ConfiguredCog.config['content']['prompt_color'])\n title = 'Prompt for today, courtesy of r/SketchDaily'\n url = 'https://reddit.com/r/SketchDaily'\n description = drawing_prompt\n message = Embed(color=color, title=title, url=url, description=description)\n\n # Send the message\n await channel.send(embed=message)\n\n # Note down that we found today's prompt (so as not to re-send it)\n self.current_prompt = drawing_prompt\n\n break", "def show_lose_screen():\n print(\"\"\"\n \n _ _ __ _ _ __ __ ____ ____ _ _ \n( \\/ )/ \\ / )( \\ ( ) / \\ / ___)( __) (_)/ ) \n ) /( O )) \\/ ( / (_/\\( O )\\___ \\ ) _) _( ( \n(__/ \\__/ \\____/ \\____/ \\__/ (____/(____) (_)\\_) \n\"\"\")", "def start():\n display_board()\n print(\"\\n\")\n y_n_prompt()", "def head_surprised():\n print (hair_spiky())\n print (eye_wide())\n print (nose_leftwards())\n print (mouth_open())\n print (chin_combo())", "def provide_command_feedback(self, message):\n self.refresh() # reset the display\n\n self.command_feedback_bar.draw_text(message)\n self.input_box.clear()\n self.input_box.draw_text(\"PRESS (C) to CONTINUE\")\n selection = \"\"\n while selection != ord(\"C\") and selection != ord(\"c\"):\n selection = self.get_input_ch()\n\n self.refresh() # reset it again", "def main():\n\n # call to OS for positioning window\n os.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (0, 25)\n\n # Initialization block\n pygame.init() # Initialize pygame module\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT)) # initialize screen\n\n # Testing\n # model_card = m_card.Card(m_card.CardType.TEMPURA)\n # view_card = v_card.CardView(screen, model_card)\n\n deck = Deck()\n player = Player()\n b_pack = deck.generate_booster(10)\n player.booster_pack = b_pack\n\n hand_view = HandView(screen, (0, SCREEN_HEIGHT - SCREEN_HEIGHT / 5), (SCREEN_WIDTH, SCREEN_HEIGHT / 5), player)\n pick_crds = PickedCardsView(screen, (0, 0), (SCREEN_WIDTH, SCREEN_HEIGHT / 5), player, 0)\n pick_crds2 = PickedCardsView(screen, (0, 0), (SCREEN_WIDTH, SCREEN_HEIGHT / 5), player, 180)\n # Game loop\n while True:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n elif event.type == pygame.MOUSEBUTTONUP:\n is_clicked([hand_view, pick_crds, pick_crds2], pygame.mouse.get_pos())\n screen.fill((0, 0, 0))\n hand_view.draw()\n pick_crds.draw()\n pick_crds2.draw()\n pygame.display.flip()", "def welcome_screen(self):\n print()\n print('*M*A*S*T*E*R*M*I*N*D*')\n print('Welcome to Mastermind!')\n print('The goal of this game is to guess the secret code.\\n' +\n 'You have as many guesses as you need.\\n' +\n 'After every guess you will see a result of that guess.\\n' +\n 'A result may look like this:\\n' +\n 'Your guess: 1,2,3,4\\n' +\n \"The result: ['1', '-', 'C', '-']\")\n print('This means the following:\\n' +\n 'The first number, 1, is in the correct position\\n' +\n 'The second number, 2, is not included in the secret code\\n' +\n 'The third number, 3,' + \n ' is in the code but is in the wrong position\\n' +\n 'The fourth number, 4, is not included in the code')\n print('When you have the correct numbers ' +\n 'in the right place, you win!\\n' +\n 'Try to beat the game in as few guesses as possible.\\n' +\n 'The first thing you will do is decide if' +\n 'you want standard or custom game.\\n' +\n 'Only the standard game can save you highscore')", "def banner():\n\n def random_color():\n valid_colors = (\"red\", \"green\", \"yellow\", \"blue\", \"magenta\", \"cyan\")\n return random.choice(valid_colors)\n\n autoRecon = rf\"\"\"\n _____________ ____ ________________\n /___/___ \\ / / | /___/__ \\ Mr.P-Millz _____\n O.G./ / _ \\______/__/ |______|__|_____ * \\_________________/__/ |___\n __/__/ /_\\ \\ | | \\ __\\/ _ \\| | __/ __ \\_/ ___\\/ _ \\| |\n | | ___ \\| | /| | ( |_| ) | | \\ ___/\\ \\__( |_| ) | |\n |___|____/\\__\\____|____/_|__|\\_\\____/|__|____|_ /\\___ |\\___ \\____/|___| /\n gtihub.com/Knowledge-Wisdom-Understanding \\___\\/ \\__\\/ \\__\\_/ v{V} \\___\\/\n\n\"\"\"\n\n def print_art(msg, color):\n colored_art = colored(msg, color=color)\n print(colored_art)\n\n color = random_color()\n print_art(autoRecon, color)", "def test_discard_action(self):\n self.plr.test_input = [\"discard silver\", \"finish selecting\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 2)\n self.assertEqual(self.plr.buys.get(), 1)\n self.assertNotIn(\"Silver\", self.plr.piles[Piles.HAND])", "def prepost_hook_two(self) -> None:\n self.poutput(\"two\")", "def prompt():\n sys.stdout.write('>> ')\n sys.stdout.flush()", "def progress_game(self):\r\n\r\n if self.actions == len(self.players):\r\n # Reveal the 3 first cards\r\n output_text = \"Dealing the flop...\"\r\n\r\n self.new_output.emit(output_text)\r\n self.community_cards.flop()\r\n\r\n if self.actions == 2 * len(self.players):\r\n # Reveal a 4th card\r\n output_text = \"Dealing the turn...\"\r\n\r\n self.new_output.emit(output_text)\r\n self.community_cards.turn()\r\n\r\n if self.actions == 3 * len(self.players):\r\n # Reveal a 5th card\r\n output_text = \"Dealing the river...\"\r\n\r\n self.new_output.emit(output_text)\r\n self.community_cards.river()\r\n\r\n if self.actions == 4 * len(self.players):\r\n self.showdown()", "def build_deck_screen_my_deck_card_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n # Draw the character card\n if user.character_card == '':\n pass\n else:\n user.character_card.rect.x = 65\n user.character_card.rect.y = 600\n screen.blit(user.character_card.image, user.character_card.rect)\n #Clear duplicate amount each frame and render the refined list\n for card_new in user.deck_list:\n card_new.duplicate = 1\n local_store_list = build_deck_screen_my_deck_card_list_refine(user)\n #use refined list to draw\n rect_position_x = 245 #local variables for rect position for the first card in the user deck\n rect_position_y = 600\n row_number = 1\n #Display cards in local_store_list:\n\n if screen_status.build_deck_screen_my_deck_page_id <= 0:\n screen_status.build_deck_screen_my_deck_page_id = 1\n # Edge cases when len() = 6,12,18....\n if len(local_store_list) % 6 == 0 and len(local_store_list) != 0:\n if screen_status.build_deck_screen_my_deck_page_id >= (len(local_store_list))//6 + 1:\n screen_status.build_deck_screen_my_deck_page_id = (len(local_store_list))//6 + 0\n\n else:\n if screen_status.build_deck_screen_my_deck_page_id >= (len(local_store_list))//6 + 2:\n screen_status.build_deck_screen_my_deck_page_id = (len(local_store_list))//6 + 1\n # Algorithm to draw all cards in local_store_list, 6 card per page.\n for card in local_store_list[6*(screen_status.build_deck_screen_my_deck_page_id - 1):6 * screen_status.build_deck_screen_my_deck_page_id]:\n if row_number <= 6:\n card.rect.x = rect_position_x\n card.rect.y = rect_position_y\n screen.blit(card.image, card.rect)\n rect_position_x += 145\n row_number += 1\n build_deck_screen_my_deck_duplicate_number_display(card, screen)\n if row_number >= 7:\n row_number = 1", "def session_preparation(self) -> None:\n self._test_channel_read(pattern=r\">\")\n self.set_base_prompt()\n self.disable_paging(command=\"set length 0\")", "def play_blackjack(wager, max_wager):\n game = BlackJack(wager=wager, max_wager=max_wager, allow_split=True, allow_dd=True)\n player = game.players[0]\n print('\\nYour Hand %s has %s points. Your wager is: %d' % (player['hand'], game.player_hand_value(), player['wager']))\n print(\"Dealer's upcard is: \", game.get_dealer_upcard())\n if not game.verify_blackjack():\n req_split = input('\\nWould you like to Split (Y/N)? ').upper()\n if req_split == 'Y':\n if game.split(): # Split\n print('Your hand has been split. You will play one Hand at a time.', end='\\n\\n')\n else:\n print(\"Looks like you don't have enough credits to split. Playing the round without the split\")\n for hand_idx in range(len(game.players)):\n if hand_idx == 1:\n print('\\nNow, Play your Second (Split) Hand. Results will be shown at the end')\n player = game.players[hand_idx]\n while True and player['active']:\n print('Your hand %s has %s points.' % (player['hand'], game.player_hand_value(hand_idx)))\n action = get_player_input(player['allow_dd'])\n if action == 1: # Hit\n game.hit(hand_idx)\n elif action == 2: # Stand. Round Ends when Player Chooses to Stand\n game.stand(hand_idx)\n elif action == 3: # Double down\n game.double_down(hand_idx)\n results = []\n print('\\nRound Results:')\n print('-' * 13)\n for hand_idx in range(len(game.players)):\n print('Your Hand %s with %s points. Your wager was %d' % (player['hand'], game.player_hand_value(hand_idx), player['wager']))\n print(\"Dealer's Final Hand %s is of value %d\" % (game.dealer_hand, game.dealer_hand_value()))\n print('Result: ', game.players[hand_idx]['result'])\n results.append(game.players[hand_idx]['result'])\n return results, game.wager_earned", "def main():\n uilist = {\n 'joyride':(\"Uses a joystick for steering and outputs console text\", joyride),\n 'curses':(\"A simple curses-based output UI with very basic arrow-key steering\", cursesui),\n 'framebuffer':(\"An output intenteded for the on-board computer, with no steering\", framebuffer),\n }\n\n parser = OptionParser()\n\n uigroup = OptionGroup(parser, \"UI options\")\n uigroup.add_option('-u', '--ui', action=\"store\", type=\"choice\", dest=\"ui\", default=\"joyride\", choices=uilist.keys(),\n help=\"Interact with this type of UI [Default: joyride]\")\n uigroup.add_option('-j', '--joystick', action=\"store\", type=\"string\", dest=\"joystick_device\", default=None,\n help=\"Path to the device file of the joystick (for joyride UI) [Default: None]\")\n uigroup.add_option('-s', '--disable-sound', action=\"store_false\", dest=\"sound\", default=True,\n help=\"Disable sound [Default: False]\")\n uigroup.add_option('-i', '--disable-input', action=\"store_false\", dest=\"allow_input\", default=True,\n help=\"Disable input [Default: False]\")\n uigroup.add_option('-c', '--become-controller', action=\"store_true\", dest=\"become_controller\", default=False,\n help=\"Become exclusive controlling connection [Default: False]\")\n uigroup.add_option('-n', '--no-control', action=\"store_false\", dest=\"allow_control\", default=True,\n help=\"Ignore all UI commands from this client [Default: False]\")\n uigroup.add_option(\"--list\", action=\"store_true\", dest=\"list\", default=False,\n help=\"List the available UIs and exit\")\n parser.add_option_group(uigroup)\n\n netgroup = OptionGroup(parser, \"Network options\")\n netgroup.add_option('-a', '--host', action=\"store\", type=\"string\", dest=\"host\", default=\"localhost\",\n help=\"Host/address to connect to [Default: localhost]\")\n netgroup.add_option('-p', '--port', action=\"store\", type=\"int\", dest=\"port\", default=9999,\n help=\"Port the server is listening on [Default: 9999]\")\n parser.add_option_group(netgroup)\n\n options, args = parser.parse_args()\n\n list_and_exit = False\n if options.list:\n list_and_exit = True\n\n if not options.ui or options.ui not in uilist:\n print \"You must pick one of the available UIs with --ui\"\n\n if list_and_exit:\n print \"Available UIs:\"\n for name, info in uilist.items():\n print \"%s %s\" % (name.ljust(30), info[0])\n return 0\n\n # create the robot\n robot = Robot(options.host, options.port)\n status = robot.get_status()\n\n # handle gracefully disconnecting the robot if anything else fails\n try:\n # create the ui\n uimod = uilist[options.ui][1]\n ui = uimod.get_ui(**vars(options))\n\n # create the steerer\n steerer = steering.SteeringModel(status)\n\n if options.sound:\n player = sound.SoundPlayer(status)\n player.play(player.SOUNDS['startup'])\n else:\n player = None\n\n # create the robot client\n client = RobotClient(robot, ui, steerer, player, options.allow_control, options.become_controller)\n\n # start up all the pieces in the right order\n if player: player.start()\n try:\n ui.init()\n ui.start()\n try:\n client.run()\n finally:\n ui.stop()\n finally:\n if player:\n player.stop(player.SOUNDS['crash'])\n finally:\n if not robot.disconnected:\n robot.disconnect()", "def _print_instructions(command, drink, flavor, amount):\n if(command == 'alter'):\n if(drink == 'none'):\n drink = 'that'\n if(flavor == 'bad' or flavor == 'good'):\n print drink, \"was\", flavor\n else:\n if(float(amount) > 0):\n print drink, \"wasn't\", flavor, 'enough'\n else:\n print drink, \"was too\", flavor\n else:\n if(drink == 'none'):\n drink = 'something'\n if(flavor == 'none'):\n flavor = ''\n print \"Make\", flavor, drink\n time.sleep(0.5)", "def battle_screen_character_1_card_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n user.character_card.rect.x = 1050\n user.character_card.rect.y = 40\n screen.blit(user.character_card.image, user.character_card.rect)\n #\n for i in range(1,16):\n if int(user.character_card.level) >= 10 * i:\n user.character_under_card_by_level[str(10 * i)].bottom_rect.x = 1050\n user.character_under_card_by_level[str(10 * i)].bottom_rect.y = 220 + 23 * (i-1)\n screen.blit(user.character_under_card_by_level[str(10 * i)].bottom_image, user.character_under_card_by_level[str(10 * i)].bottom_rect)\n\n #", "def _postConnect(self):\n p = self.spawnProc\n msg = \"SessionManager._postConnect: failed to get prompt\"\n expList.append(self.prompt)\n match = p.expect(expList, self.sshTimeout)\n self._postCheck(match,msg,True)", "def __draw_lostscreen(self) -> None:\n self.__draw_background()\n line1 = LOOSE_FONT.render(\"You loose!! Highscore: \" + str(round(self.highscore)), 1, RED)\n line2 = LOOSE_FONT.render(\"Press enter to play again\", 1, RED)\n self.win.blit(line1, (round((WIDTH/2) - line1.get_width()/2), round(HEIGHT/2 - line1.get_height()/2)))\n self.win.blit(line2, (round((WIDTH/2) - line2.get_width()/2), round(HEIGHT/2 - line1.get_height()/2) + line1.get_height() + 5)) \n pygame.display.update()", "def display_cli(conversations, alt_speaker, human_speaker):\n for speaker, speech in conversations:\n if speaker == END_OF_CONVO:\n print(\"-\" * 20 + \"END OF CONVERSATION\" + \"-\" * 20)\n elif speaker == alt_speaker:\n print(\"%-15s: %s\" % (speaker[:15], speech))\n else:\n prBlueBG(\"%-15s: %s\" % (speaker[:15], speech))", "def main():\n\tprint(\"\"\"Welcome to 5 Card Stud!\"\"\")\n\tnames = determine_game_type()\n\thands = make_hands(names)\n\tvalues = []\n\tfor hand in hands:\n\t\tvalue = determine_what_is_in_a_hand(hand)\n\t\tvalues.append(value)\n\thand_values = []\n\tfor hand in values:\n\t\thand_value = determine_hand_value(hand)\n\t\thand_values.append(hand_value)\n\thigh_hand, high_card_involved, high_hands = compare_hand_values(hand_values)\n\tprint(f\"\\nHere are the hands for this game:\\n\")\n\tfor id, hand in enumerate(hands):\n\t\tprint(f\"{NAMES[id]} : {hand}\")\n\tprint()\n\tprint(\"high_hand: \", VALUES[high_hand], \" high_card_involved: \", FACES[high_card_involved])\n\tprint()\n\tfor idx, value in enumerate(hand_values):\n\t\tif value[0][high_hand] and value[1] == high_card_involved:\n\t\t\tprint(f\"{(NAMES[idx]).strip()} is the big winner with a {(VALUES[high_hand]).strip()}, {(FACES[high_card_involved]).strip()}s high\")", "def deal():\n \n # Update messages, score and the player's \"Hand\" status\n # as global variables.\n global outcome, outcome_plus, outcome_plus_plus, in_play, score, action \n outcome = outcome_plus = outcome_plus_plus = \"\"\n action = HIT_OR_STAND\n \n # If the \"Deal\" button is clicked during the middle of \n # a round the program reports that the \"Player\" lost \n # the round and updates the \"score\" appropriately.\n if in_play:\n outcome = PLAYER_LOSES \n outcome_plus = EARLY_DEAL_1\n outcome_plus_plus = EARLY_DEAL_2\n score -= SCORE_POINTS\n else:\n in_play = True\n \n # Create and shuffle the \"Deck\" (stored as a global \n # variable). Avoids the situation where the \"Deck\" \n # becomes empty during play.\n global deck_of_cards\n deck_of_cards = Deck()\n deck_of_cards.shuffle()\n \n # Create new \"Player\" and \"Dealer\" Hands (stored as \n # global variables). \n global player, dealer\n player = Hand()\n dealer = Hand()\n \n # Add two \"Cards\" to each \"Hand\". To transfer a \"Card\" \n # from the \"Deck\" to a \"Hand\", the \"deal_card()\" \n # method of the \"Deck\" class and the \"add_card()\" \n # method of \"Hand\" class are being used in \n # combination. \n player.add_card(deck_of_cards.deal_card())\n dealer.add_card(deck_of_cards.deal_card())\n player.add_card(deck_of_cards.deal_card())\n dealer.add_card(deck_of_cards.deal_card())\n \n # Print resulting \"Hands\" to the console with an \n # appropriate message indicating which \"Hand\" is which.\n # Remove comments if in DEBUG mode.\n #print \"Player: \" + str(player)\n #print \"Dealer: \" + str(dealer) \n \n return None", "def mode_crypt_cramershoup(stdscr, message=None):\n loop = True\n cursor = 0\n while loop:\n show_key_choices(stdscr, cursor, message)\n key = stdscr.getkey()\n loop = False\n cs = CramerShoup()\n if key == '1' or (key == '\\n' and cursor == 1):\n key_size = choose_keys_size(stdscr)# choose the size of key [256,512,1024]\n stdscr.clear()\n stdscr.addstr(\"Création des clés de chiffrement ...\\n\\n\")\n stdscr.refresh()\n cs.generate_keys(key_size)\n stdscr.addstr(\"Vos clés ont été générés dans keys/\\n\")\n stdscr.refresh()\n napms(2000)\n mode_crypt_cramershoup(stdscr, \"Les clés ont été générés\\n\")\n \n elif key == '2' or (key == '\\n' and cursor == 2):\n # chiffre avec la clé privé (la clé privé contient la clé publique)\n key_file_name = input_user(stdscr, \"Veuiller entrer l'enplacement de la clé public. Ctrl + G pour confirmer\")\n try:\n cs.read_key(key_file_name)\n except IOError:\n # cannot open the file\n mode_crypt_cramershoup(stdscr, \"Impossible de lire la clé dans le fichier {}\".format(key_file_name))\n return\n file_name = input_user(stdscr, \"Clé chargé avec succès.\\n Veuillez entrer le nom du fichier à chiffrer\")\n try:\n file = open(file_name)\n file.close()\n except IOError:\n mode_crypt_cramershoup(stdscr, \"Impossible d'ouvrir le fichier {}\".format(file_name))\n return\n # si le fichier est un pgm, on laisse le choix à l'utilisateur\n pgm = False\n if re.match('.+\\.pgm.*', file_name) is not None:\n pgm = choix_mode_PGM(stdscr)\n \n # on chiffre le fichier\n stdscr.clear()\n stdscr.addstr(\"En cours de chiffrement ...\\n\")\n stdscr.refresh()\n wrap = None\n if pgm:\n wrap = PGMEncrypter(file_name, cs, cs.bit_size//(2*8), file_name + \".crypted\", 4*cs.bit_size//8)\n else:\n wrap = BlockFileEncrypter(file_name, cs, cs.bit_size//(2*8), file_name + \".crypted\", 4*cs.bit_size//8)\n wrap.crypt_to_out()\n stdscr.addstr(\"Votre fichier {} a été chiffré :) !\".format(file_name), curses.color_pair(3))\n stdscr.refresh()\n napms(1000)\n menu(stdscr)\n elif key == 'm' or (key == '\\n' and cursor == 3):\n menu(stdscr)\n elif key == 'KEY_UP' and cursor > 1:\n cursor -= 1\n loop = True\n elif key == 'KEY_DOWN' and cursor < 3:\n cursor += 1\n loop = True\n else:\n loop = True", "def bandit_camp_alladin():\n\n alladin_hum = enemies.alladin_hum()\n os.system(\"clear\")\n print(\"YOU BLOODY BASTARD!! YOU KILLED MY COMRADES!!\")\n time.sleep(1)\n for i in range(2):\n print(\".\")\n time.sleep(1)\n print(\"You won't leave this cave alive! I am Alladin the Prince of Thieves!\")\n time.sleep(2)\n print(\"Prepare to die!!!!!!!!!!!\")\n print()\n input(\"Press Enter to continue...\")\n return combat.combat(alladin_hum)", "def handDecision(handIn):", "def main():\n\tcolorama.init()\n\n\n\n\tgrid = get_start_grid(*map(int,sys.argv[1:]))\n\tprint_grid(grid)\n\n\twhile True:\n\t\tgrid_copy = copy.deepcopy(grid)\n\t\tget_input = getch(\"Enter direction (w/a/s/d/n/r/q): \")\n\t\tif get_input in functions:\t\n\t\t\tfunctions[get_input](grid)\n\t\telif get_input == \"n\":\n\t\t\tif get_next_action(grid) == '':\n\t\t\t\tprint(\"Checkmate!\")\n\t\t\t\tbreak\n\t\t\tfunctions[get_next_action(grid)](grid)\n\t\telif get_input == \"r\":\n\t\t\tbreak\n\t\telif get_input == \"q\":\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"\\nInvalid choice.\")\n\t\t\tcontinue\n\t\tif grid != grid_copy:\n\t\t\tif not prepare_next_turn(grid):\n\t\t\t\tprint_grid(grid)\n\t\t\t\tprint(\"Well played!\")\n\t\t\t\tbreak\n\t\tprint_grid(grid)\n\t\n\tif get_input == \"r\":\n\t\twhile True:\n\t\t\tgrid_copy = copy.deepcopy(grid)\n\n\t\t\tnext_action = get_next_action(grid)\n\t\t\tif next_action == '':\n\t\t\t\tprint(\"Checkmate!\")\n\t\t\t\tbreak\n\t\t\t\n\t\t\tfunctions[next_action](grid)\n\t\t\tif grid != grid_copy:\n\t\t\t\tif not prepare_next_turn(grid):\n\t\t\t\t\tprint_grid(grid)\n\t\t\t\t\tprint(\"Well played!\")\n\t\t\t\t\tbreak\n\t\t\tprint_grid(grid)\n\n\tprint(\"Thanks for playing.\")" ]
[ "0.54052615", "0.5379841", "0.53509325", "0.5323831", "0.5321609", "0.5311181", "0.53076744", "0.5279506", "0.5269653", "0.52644014", "0.52467823", "0.52465475", "0.5240277", "0.52396023", "0.521198", "0.52030087", "0.5173321", "0.51696783", "0.51504856", "0.51416767", "0.5134641", "0.5130811", "0.5128127", "0.51172966", "0.5104382", "0.50758237", "0.5070483", "0.50582737", "0.5058194", "0.5042515", "0.50399154", "0.5039073", "0.50247085", "0.5022867", "0.5007679", "0.5003908", "0.49984768", "0.4997175", "0.4987969", "0.49853915", "0.49825987", "0.4965874", "0.49585667", "0.49340138", "0.49306417", "0.4930196", "0.49272335", "0.49039868", "0.49006253", "0.489586", "0.4893623", "0.48930323", "0.4886512", "0.4886512", "0.4886345", "0.48856553", "0.48800424", "0.48755988", "0.4870038", "0.48663947", "0.4865801", "0.4854202", "0.48519754", "0.4850402", "0.4847923", "0.48462117", "0.4846137", "0.48442644", "0.48424828", "0.48386377", "0.48375848", "0.4834779", "0.48318753", "0.48271713", "0.48244128", "0.48188564", "0.4809262", "0.48064083", "0.48054248", "0.47952333", "0.4794118", "0.47930026", "0.47919664", "0.4791736", "0.47850317", "0.47804958", "0.47783917", "0.47746128", "0.47734028", "0.4772588", "0.47715077", "0.4771057", "0.47678283", "0.47650263", "0.47645846", "0.47639588", "0.47629288", "0.47560716", "0.4754127", "0.47532767" ]
0.521329
14
Html representation of Facets Overview for use in a Jupyter notebook.
def _repr_html_(self) -> str: protostr = base64.b64encode(self._proto.SerializeToString()).decode('utf-8') html_template = ''' <script src="{webcomponents_js}"></script> <link rel="import" href="{facets_html}"> <facets-overview id="overview_elem"></facets-overview> <script> document.querySelector("#overview_elem").protoInput = "{protostr}"; </script>''' html = html_template.format( facets_html=FACETS_DEPENDENCIES['facets_html'], webcomponents_js=FACETS_DEPENDENCIES['webcomponents_js'], protostr=protostr, ) return html
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _repr_html_(self) -> str:\n html_template = \"\"\"\n <script src=\"{webcomponents_js}\"></script>\n <link rel=\"import\" href=\"{facets_html}\">\n <facets-dive id=\"dive_elem\" height=\"{height}\"></facets-dive>\n <script>\n document.querySelector(\"#dive_elem\").data = {data};\n </script>\"\"\"\n html = html_template.format(\n facets_html=FACETS_DEPENDENCIES['facets_html'],\n webcomponents_js=FACETS_DEPENDENCIES['webcomponents_js'],\n data=self._data.to_json(orient='records'),\n height=self.height,\n )\n return html", "def _ipython_display_(self):\n spec, render_type = self._get_spec_info()\n\n id = uuid.uuid4()\n publish_display_data(\n {'text/html': self._generate_html(id)},\n metadata={'jupyter-vega3': '#{0}'.format(id)}\n )\n publish_display_data(\n {'application/javascript':\n self._generate_js(id, spec, render_type)},\n metadata={'jupyter-vega3': '#{0}'.format(id)}\n )", "def _repr_html_(self):\n return (\n f'<b>GalaxyCluster:</b> {self.unique_id} '\n f'(ra={self.ra}, dec={self.dec}) at z={self.z}'\n f'<br>> <b>with columns:</b> {self._str_colnames()}'\n f'<br>> {len(self.galcat)} source galaxies'\n f'<br>{self.galcat._html_table()}'\n )", "def features():\n\n return render_template('features.html')", "def display_feature(self):\n return ', '.join([feature.name for feature in self.features.all()])", "def get_overview():\n from app.core.api_views import Api\n from app.modules.overview import inc\n sar = inc.main()\n api = Api()\n return render_template(\"index.html\",\n sar=sar,\n )", "def graph_section():\n return html.Div(\n className = \"tab-container tab-two-section\",\n children = [\n html.Div(\n className = \"container-col\",\n children = [\n climate_profiles_title(), \n climate_profiles_graphs()\n ]\n )\n ]\n )", "def overview(data):\n\n printer.table(['Name', 'El', 'Invariom name', 'Model compound'], head=True)\n for atom in data.iter_atoms(True):\n printer.table([atom.name, atom.element, atom.invariom_name, atom.invariom.molecule.name])\n printer.table(done=True)", "def _repr_html_(self):\n params = OrderedDict()\n params[\"Name\"] = self.name\n params[\"Description\"] = self.description\n params[\"Ns\"] = self.Ns\n params[\"Ni\"] = self.Ni\n params[\"Kinetic Parameter\"] = self.kinetic_parameter_type\n params[\"Kinetic Parameter Value\"] = self.kinetic_parameter_value \n \n header = \"<table>\"\n footer = \"</table>\"\n html = \"\"\n\n for key, val in params.items():\n html += \"<tr><td>{0}</td><td>{1}</td></tr>\".format(key, val)\n\n return header + html + footer", "def viewVocab(self): \n mapping = []\n views = registration.getViews(IBrowserRequest)\n for view in views:\n if view.name and self.getRenderableView(view.name):\n mapping.append((view.name, view.name))\n return atapi.DisplayList(mapping)", "def display_facet(model_name, vertices, faces, plot_type, display_normals=False, scale=0.2):\n # Separate the coordinates of the vertices\n x = vertices[:, 0]\n y = vertices[:, 1]\n z = vertices[:, 2]\n\n # Display the model\n ax = Axes3D(plt.figure())\n if plot_type == 'Facet':\n ax.plot_trisurf(x, y, z, triangles=faces, color=(1, 1, 1, 1), edgecolor='gray')\n elif plot_type == 'Wireframe':\n ax.plot_trisurf(x, y, z, triangles=faces, color='none', edgecolor='black')\n ax.grid(True)\n set_equal(ax)\n\n ax.set_title(model_name, size='14')\n ax.set_xlabel('X', size='12')\n ax.set_ylabel('Y', size='12')\n ax.set_zlabel('Z', size='12')\n\n # Set the tick label size\n ax.tick_params(labelsize=12)\n\n if display_normals:\n\n # Vector from origin to vertices\n r = zeros([vertices.shape[0], 3])\n\n for i in range(vertices.shape[0]):\n r[i] = [vertices[i][0], vertices[i][1], vertices[i][2]]\n\n for i in range(faces.shape[0]):\n a = r[faces[i][1]] - r[faces[i][0]]\n b = r[faces[i][2]] - r[faces[i][1]]\n\n # Outward normal\n normal = cross(a, b) + 0.\n\n # Scale the size of the arrow to be displayed\n normal *= scale\n\n # Put the arrow at the center of the facet\n mean_r = (r[faces[i][0]] + r[faces[i][1]] + r[faces[i][2]]) / 3.0\n\n # Get the arrow for the normal\n arrow = Arrow3D([mean_r[0], mean_r[0] + normal[0]], [mean_r[1], mean_r[1] + normal[1]],\n [mean_r[2], mean_r[2] + normal[2]], mutation_scale=10, lw=1, arrowstyle=\"-|>\", color=\"r\")\n ax.add_artist(arrow)\n\n plt.show()", "def default_display_function(feature):\n # n_samples = min(n_samples, feature.shape[0])\n IPython.display.display(widgets.Box(layout=widgets.Layout(height=\"2.5%\")))\n IPython.display.display(feature)\n IPython.display.display(widgets.Box(layout=widgets.Layout(height=\"2.5%\")))", "def show_overview(self) -> None:\n print(f\"\\n\\nCluster overview:\")\n all_clusters = self.get_all_clusters()\n print(f\" - Total of {len(all_clusters)} clusters\")\n if all_clusters:\n cluster_lengths = [len(v) for v in all_clusters.values()]\n print(f\" - Average number of cluster-labels: {round(sum(cluster_lengths) / len(cluster_lengths), 2)}\")", "def _show_info(self):\n\n dataframe = self._cache.get_source(config.DATAFRAME_ARTISTS)\n dataframe.printSchema()", "def display(self):\n options = {\"ent_only_plot\": True,\n \"rel_only_plot\": not self.config.plot_entity_only,\n \"ent_and_rel_plot\": not self.config.plot_entity_only}\n\n if self.config.plot_embedding:\n viz = Visualization(model=self.model, vis_opts = options)\n\n viz.plot_embedding(resultpath=self.config.figures, algos=self.model.model_name, show_label=False)\n\n if self.config.plot_training_result:\n viz = Visualization(model=self.model)\n viz.plot_train_result()\n\n if self.config.plot_testing_result:\n viz = Visualization(model=self.model)\n viz.plot_test_result()", "def index() -> object:\n return render_template('ue_bootstrap.j2', title='UENERGO TAGS')", "def get_visualizations( self, dataset ):\n\n return [ 'phyloviz' ]", "def table_registered_habits(self, title='YOUR HABIT(S)'):\n self.analytics.display_table(\n ('ID', 'HABIT'),\n list(self.analytics.select_columns(\n self.analytics.habits_table(),\n stop=2)),\n title)", "def show_db_overview(self):\n\n models_list = sorted_models_list()\n apps = [p.app_label for p in settings.SITE.installed_plugins]\n s = \"%d apps: %s.\" % (len(apps), \", \".join(apps))\n s += \"\\n%d models:\\n\" % len(models_list)\n i = 0\n headers = [\n #~ \"No.\",\n \"Name\",\n \"Default table\",\n #~ \"M\",\n \"#fields\",\n \"#rows\",\n #~ ,\"first\",\"last\"\n ]\n rows = []\n for model in models_list:\n if True: # model._meta.managed:\n i += 1\n cells = []\n #~ cells.append(str(i))\n cells.append(fmn(model))\n cells.append(model.get_default_table())\n #~ cells.append(str(model))\n #~ if model._meta.managed:\n #~ cells.append('X')\n #~ else:\n #~ cells.append('')\n cells.append(str(len(model._meta.concrete_fields)))\n qs = model.objects.all()\n n = qs.count()\n cells.append(str(n))\n #~ if n:\n #~ cells.append(obj2str(qs[0]))\n #~ cells.append(obj2str(qs[n-1]))\n #~ else:\n #~ cells.append('')\n #~ cells.append('')\n\n rows.append(cells)\n s += rstgen.table(headers, rows)\n return s", "def __repr__(self):\n\n (sections, section_titles) = self._get_summary_struct()\n\n return _toolkit_repr_print(self, sections, section_titles, width=30)", "def _repr_html_(self):\n return (\n f'<b>{self.__class__.__name__}</b>'\n f'<br> <b>defined by:</b> {self._str_meta_()}'\n f'<br> <b>with columns:</b> {self._str_colnames()}'\n f'<br> {len(self)} objects'\n f'<br> {self._html_table()}'\n )", "def visualization(data):\n\t# preview top 5 row of data\n\tprint(\"\\n--------Data preview--------\\n{0}\"\n\t\t .format(data.head()))\n\tprint(\"\\nNull value status as follow:\\n{0}\".format(data.isnull().sum()))\n\tcols = [col for col in data.columns]\n\tprint(\"\\nNumber of original features: {0}\".format(len(cols)))\n\tprint(\"\\nFeatures types:\\n{0}\".format(data[cols].dtypes.value_counts()))\n\n\tcounts = [[], [], []]\n\tfor col in cols:\n\t\t# the data type of each feature\n\t\ttyp = data[col].dtype\n\t\t# the number of differents value in each feature\n\t\tuniq = len(np.unique(data[col]))\n\t\t# constant value feature\n\t\tif uniq == 1:\n\t\t\tcounts[0].append(col)\n\t\t# binary value feature\n\t\telif uniq == 2 and typ == np.int64:\n\t\t\tcounts[1].append(col)\n\t\t# multiple value feature\n\t\telse:\n\t\t\tcounts[2].append(col)\n\n\tprint('\\nConstant features: {}\\nBinary features: {} \\nCategorical features: {}\\n'.format(*[len(c) for c in counts]))\n\tprint('Constant features:', counts[0])\n\tprint('Binary features:', counts[1])\n\tprint('Categorical features:', counts[2])\n\n\tfig, axes = plt.subplots(2,2)\n\tfig.set_size_inches(12, 10)\n\tsn.boxplot(data=data,y=\"count\",orient=\"v\",ax=axes[0][0])\n\tsn.boxplot(data=data,y=\"count\",x=\"season\",orient=\"v\",ax=axes[0][1])\n\tsn.boxplot(data=data,y=\"count\",x=\"hour\",orient=\"v\",ax=axes[1][0])\n\tsn.boxplot(data=data,y=\"count\",x=\"workingday\",orient=\"v\",ax=axes[1][1])\n\n\taxes[0][0].set(ylabel='Count',title=\"Box Plot On Count\")\n\taxes[0][1].set(xlabel='Season', ylabel='Count',title=\"Box Plot On Count Across Season\")\n\taxes[1][0].set(xlabel='Hour Of The Day', ylabel='Count',title=\"Box Plot On Count Across Hour Of The Day\")\n\taxes[1][1].set(xlabel='Working Day', ylabel='Count',title=\"Box Plot On Count Across Working Day\")\n\tplt.show()\n\n\tfig,(ax1,ax2,ax3,ax4)= plt.subplots(nrows=4)\n\tfig.set_size_inches(12,20)\n\tsortOrder = [1,2,3,4,5,6,7,8,9,10,11,12]\n\thueOrder = [\"Sunday\",\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\"]\n\n\tmonthAggregated = pd.DataFrame(data.groupby(\"month\")[\"count\"].mean()).reset_index()\n\tmonthSorted = monthAggregated.sort_values(by=\"count\",ascending=False)\n\tsn.barplot(data=monthSorted,x=\"month\",y=\"count\",ax=ax1,order=sortOrder)\n\tax1.set(xlabel='Month', ylabel='Avearage Count',title=\"Average Count By Month\")\n\n\thourAggregated = pd.DataFrame(data.groupby([\"hour\",\"season\"],sort=True)[\"count\"].mean()).reset_index()\n\tsn.pointplot(x=hourAggregated[\"hour\"], y=hourAggregated[\"count\"],hue=hourAggregated[\"season\"],\n\t data=hourAggregated, join=True,ax=ax2)\n\tax2.set(xlabel='Hour Of The Day', ylabel='Users Count',\n\t title=\"Average Users Count By Hour Of The Day Across Season\",label='big')\n\n\thourAggregated = pd.DataFrame(data.groupby([\"hour\",\"weekday\"],sort=True)[\"count\"].mean()).reset_index()\n\tsn.pointplot(x=hourAggregated[\"hour\"], y=hourAggregated[\"count\"],hue=hourAggregated[\"weekday\"],hue_order=hueOrder,\n\t data=hourAggregated, join=True,ax=ax3)\n\tax3.set(xlabel='Hour Of The Day', ylabel='Users Count',\n\t title=\"Average Users Count By Hour Of The Day Across Weekdays\",label='big')\n\n\thourTransformed = pd.melt(data[[\"hour\",\"casual\",\"registered\"]], id_vars=['hour'], value_vars=['casual', 'registered'])\n\thourAggregated = pd.DataFrame(hourTransformed.groupby([\"hour\",\"variable\"],sort=True)[\"value\"].mean()).reset_index()\n\tsn.pointplot(x=hourAggregated[\"hour\"], y=hourAggregated[\"value\"],hue=hourAggregated[\"variable\"],\n\t hue_order=[\"casual\",\"registered\"], data=hourAggregated, join=True,ax=ax4)\n\tax4.set(xlabel='Hour Of The Day', ylabel='Users Count',\n\t title=\"Average Users Count By Hour Of The Day Across User Type\",label='big')\n\tplt.show()", "def vggface2_labels(self):\n id_meta = pd.read_csv(\"loki/static/models/vggface2/identity_meta.csv\",\n sep=\"\\n\")\n id_meta = id_meta[\n 'Class_ID, Name, Sample_Num, Flag, Gender'].str\\\n .split(',', expand=True)\n\n id_meta.columns = [\n 'Class_ID', 'Name', 'Sample_Num', 'Flag', 'Gender', 'None']\n id_meta.drop(columns=['None'], inplace=True)\n\n vgg_names = id_meta.drop(columns=[\n 'Sample_Num', 'Flag', 'Gender']).set_index('Class_ID')\n\n return vgg_names", "def __repr__(self):\n (sections, section_titles) = self._get_summary_struct()\n return _toolkit_repr_print(self, sections, section_titles, width=30)", "def overview():\n return render_template('api/api.html', title='API Overview')", "def facets(self, *args, **kwargs) -> Any:\n pass", "def _create_info_div(self, summary_statistics, feature):\n feature_dict = summary_statistics[feature]\n\n # statistics using describe method of pandas.DataFrame\n text = self._info_div_html.format(\n feature_description_class=self.feature_description_class,\n info_div_content=self._info_div_content,\n type=feature_dict[\"type\"],\n description=feature_dict[\"description\"],\n mean=feature_dict[\"mean\"],\n median=feature_dict[\"50%\"],\n min=feature_dict[\"min\"],\n max=feature_dict[\"max\"],\n std=feature_dict[\"std\"],\n missing=feature_dict[\"missing\"]\n )\n d = Div(name=self._info_div_content, css_classes=[self._info_div_content], text=text)\n\n return d", "def show_table():\n\n title_list = ('ID', 'Platform', 'Producer', 'Year', 'Elements')\n \n return table, title_list", "def describe(self) -> str:", "def __repr__(self):\n\n (sections, section_titles) = self._get_summary_struct()\n return _tkutl._toolkit_repr_print(self, sections, section_titles, width=30)", "def display_html_snapshots_widget():\n if not get_ipython():\n print('The HTML snapshot widget cannot be display in environments other than IPython.')\n return\n\n # Configure notebook display preferences to better suit this UI. These display settings\n # will be in effect for all cells in the notebook run after this one is run.\n pd.set_option('display.max_colwidth', None)\n pd.set_option('display.max_rows', None)\n get_ipython().run_cell_magic(\n 'javascript',\n '',\n '''// Display cell outputs to full height (no vertical scroll bar)\n IPython.OutputArea.auto_scroll_threshold = 9999;''')\n\n # Retrieve the workspace metadata for the current user and environment.\n ws_meta = WorkspaceMetadata()\n workspace_names2id = collections.OrderedDict(sorted(\n ws_meta.get_workspace_name_to_id_mapping().items()))\n workspace_names2id_include_readonly = collections.OrderedDict(sorted(\n ws_meta.get_workspace_name_to_id_mapping(include_private_readonly=True).items()))\n workspace_ids2bucket_include_readonly = ws_meta.get_workspace_id_to_bucket_mapping(include_private_readonly=True)\n workspace_paths = {k: WorkspacePaths(workspace_bucket=v)\n for k, v in workspace_ids2bucket_include_readonly.items()}\n\n ui_output = widgets.Output()\n\n ui_tabs = widgets.Tab()\n ui_tabs.children = [create_html_snapshot_widget(ws_names2id=workspace_names2id,\n ws_paths=workspace_paths,\n output=ui_output),\n create_view_files_widget(ws_names2id=workspace_names2id_include_readonly,\n ws_paths=workspace_paths,\n output=ui_output),\n create_view_all_comments_widget(ws_names2id=workspace_names2id_include_readonly,\n ws_paths=workspace_paths,\n output=ui_output)]\n ui_tabs.set_title(title='Create', index=0)\n ui_tabs.set_title(title='View one', index=1)\n ui_tabs.set_title(title='View all', index=2)\n\n display(ui_tabs, ui_output)", "def get_html(self):\r\n context = {\r\n 'display_name': self.display_name_with_default,\r\n 'instructions_html': self.instructions,\r\n 'annotation_storage': self.annotation_storage_url,\r\n 'token': retrieve_token(self.user, self.annotation_token_secret),\r\n 'tag': self.instructor_tags,\r\n 'openseadragonjson': self.openseadragonjson,\r\n }\r\n\r\n return self.system.render_template('imageannotation.html', context)", "def get_component_html(self):\n return '\\n'.join([hunit.get_component_html() for hunit in self.harmonizationunit_set.all()])", "def show_toc():\n html(\"\"\"\\\n <script>\n $.getScript('https://kmahelona.github.io/ipython_notebook_goodies/ipython_notebook_toc.js')\n </script>\n <h2 id=\"tocheading\">Table Of Contents</h2>\n <div id=\"toc\"></div><hr>\"\"\")", "def subplot_1(self, Graph, n_tabs):\n # The code below walks does a pre-order traversal of the tree\n # For exact details about the structure of self.Graph refer description in init function.\n\n attr_name = list(Graph.keys())[0]\n print(\"\\t\"*(n_tabs),\"feature name :\",attr_name)\n for val in list(Graph[attr_name].keys()):\n print(\"\\t\"*(n_tabs+1),\"feature value :\",val)\n sub_graph = Graph[attr_name][val]\n if (type(sub_graph)==dict):\n self.subplot_1(sub_graph, n_tabs+2)\n else:\n print(\"\\t\"*(n_tabs+2),\"class :\", sub_graph)", "def show(self):\n\t\traise NotImplementedError()", "def _repr_html_(self): # pragma: no cover\n return Utils.render_html('extent.html', extent=self)", "def facets(self):\n return self._facets", "def get_html(self):\r\n context = {\r\n 'display_name': self.display_name_with_default,\r\n 'element_id': self.element_id,\r\n 'instructions_html': self.instructions,\r\n 'content_html': self._render_content()\r\n }\r\n\r\n return self.system.render_template('annotatable.html', context)", "def tags():\n tag = \"\"\n tag += \"Supported inflexions and appropriate keys\\n\\n\"\n for item in vkeys.keys():\n tag += (\"%s\\t - %s\\n\" %(item.ljust(10,' '), vkeys[item]))\n return tag", "def show_feature_summary(df, colname, display_uniques=False):\n\tprint('Details of feature:',colname)\n\tprint(' - datatype:',df[colname].dtypes)\n\tprint(' - col.size:',df[colname].shape)\n\tprint(' - NaN.vals:',df[colname].isnull().sum())\n\tif (display_uniques): print(' - uniqvals:',get_unique_values(df, colname))\n\tif (display_uniques): print(' - cnt.vals:',get_unique_counts(df, colname))\n\tprint(\"\\n\")", "def describe_notebook_instance(NotebookInstanceName=None):\n pass", "def summary(self, view):\n\t\tview.clear()\n\n\t\tname_item = QTreeWidgetItem(view)\n\t\tname_item.setText(0, 'Name')\n\t\tname_item.setText(1, self.name())\n\t\tname_item.setToolTip(1, self.name())\n\t\ttype_item = QTreeWidgetItem(view)\n\t\ttype_item.setText(0, 'Type')\n\t\ttype_item.setText(1, self.type())\n\t\tparams_item = QTreeWidgetItem(view)\n\t\tparams_item.setText(0, 'Parameters')\n\n\t\t# show classifier hyperparameters\n\t\tparams = self.params()\n\t\tfor param in params:\n\t\t\tparam_item = QTreeWidgetItem()\n\t\t\tparam_item.setText(0, param)\n\t\t\tparam_item.setToolTip(0, param)\n\t\t\tvalue = params[param]\n\t\t\tif value == None:\n\t\t\t\tparam_item.setText(1, 'None')\n\t\t\telse:\n\t\t\t\tparam_item.setText(1, str(value))\n\t\t\tparams_item.addChild(param_item)\n\n\t\tmetrics_item = QTreeWidgetItem(view)\n\t\tmetrics_item.setText(0, 'Performance')\n\n\t\t# show metrics on training data\n\t\ttrain_metrics_item = QTreeWidgetItem(metrics_item)\n\t\ttrain_metrics_item.setText(0, 'Training')\n\t\ttrain_metrics = self.metrics('train')\n\t\ttrain_metrics.summary(train_metrics_item)\n\n\t\t# show metrics on validation data\n\t\tval_metrics_item = QTreeWidgetItem(metrics_item)\n\t\tval_metrics_item.setText(0, 'Validation')\n\t\tval_metrics = self.metrics('val')\n\t\tif val_metrics is not None:\n\t\t\tval_metrics.summary(val_metrics_item)\n\n\t\t# show metrics on test data\n\t\ttest_metrics = self.metrics('test')\n\t\tif test_metrics is not None:\n\t\t\ttest_metrics_item = QTreeWidgetItem(metrics_item)\n\t\t\ttest_metrics_item.setText(0, 'Test')\n\t\t\ttest_metrics.summary(test_metrics_item)\n\n\t\tcomment_item = QTreeWidgetItem(view)\n\t\tcomment_item.setText(0, 'Comment')\n\t\tcomment_text_item = QTreeWidgetItem(comment_item)\n\t\tcomment_text_item.setFirstColumnSpanned(True)\n\t\tcomment_text_item.setText(0, self.comment())", "def render(self):\n print(self._get_grid_representations())", "def describe(self):\n\n ret = []\n ret.append(\"Functional ID: %s\" % self._number)\n ret.append(\"Functional Name: %s\" % self._xc_func_name)\n ret.append(\"Attributes:\")\n ret.append(\" Name: %s\" % self._name)\n ret.append(\" Kind: %d\" % self._kind)\n ret.append(\" Family: %d\" % self._family)\n ret.append(\"Citations:\")\n for x in self._refs:\n ret.append(\" \" + x)\n\n return \"\\n\".join(ret)", "def showtopologies():\n middleware.protocolObj.showTopologies()", "def renderPreview(self):\n html = \"\"\n html += u'<b>%s%s</b>' % (_(u\"Forum Name: \"),self.forum.forumName)\n html += u\"<br/>%s<br/>\" % self.forum.introduction\n html += self.discussionElement.renderPreview()\n html += self.lmsElement.renderView()\n html += u\"<br/><br/>\\n\"\n return html", "def inspect(self, axis_units='px', frontview=True):\n ax = super().inspect(axis_units=axis_units, frontview=frontview)\n scale = self._get_plot_scale_factor(axis_units)\n\n # Label modules and tiles\n for ch, module in enumerate(self.modules):\n s = 'Q{Q}M{M}'.format(Q=(ch // 4) + 1, M=(ch % 4) + 1)\n cx, cy, _ = module[4].centre() * scale\n ax.text(cx, cy, s, fontweight='bold',\n verticalalignment='center',\n horizontalalignment='center')\n\n for t in [0, 7]:\n cx, cy, _ = module[t].centre() * scale\n ax.text(cx, cy, 'T{}'.format(t + 1),\n verticalalignment='center',\n horizontalalignment='center')\n\n ax.set_title('AGIPD-1M detector geometry ({})'.format(self.filename))\n return ax", "def inspect(self, axis_units='px', frontview=True):\n ax = super().inspect(axis_units=axis_units, frontview=frontview)\n scale = self._get_plot_scale_factor(axis_units)\n\n # Label modules and tiles\n for ch, module in enumerate(self.modules):\n s = 'Q{Q}M{M}'.format(Q=(ch // 4) + 1, M=(ch % 4) + 1)\n cx, cy, _ = module[0].centre() * scale\n ax.text(cx, cy, s, fontweight='bold',\n verticalalignment='center',\n horizontalalignment='center')\n\n for t in [1]:\n cx, cy, _ = module[t].centre() * scale\n ax.text(cx, cy, 'T{}'.format(t + 1),\n verticalalignment='center',\n horizontalalignment='center')\n\n ax.set_title('DSSC detector geometry ({})'.format(self.filename))\n return ax", "def plot_feature_violin(self):\n\n num_features = len(self.feature_names)\n fig, ax = plt.subplots(nrows=1, ncols=num_features, figsize=(50,2))\n fig.patch.set_facecolor('#E0E0E0')\n fig.suptitle('Violin plots of 19 features', fontsize=14)\n\n for i in range(num_features):\n ax[i].violinplot(self.scaled_train_data[self.train_data.columns[i]], widths=0.9,\n showmeans=False, showextrema=False, showmedians=False)\n ax[i].set_xlabel(self.train_data.columns[i])\n\n plt.savefig(r'data_analysis\\violins_' + self.file_name + '.png', \n facecolor=fig.get_facecolor(), bbox_inches='tight')", "def html_viewer(i):\n\n i['module_uoa']='experiment.tune.compiler.flags'\n i['module_cfg']=copy.deepcopy(cfg)\n i['module_work']=copy.deepcopy(work)\n return ck.access(i)", "def show(self):\n raise NotImplementedError", "def show(self):\n raise NotImplementedError", "def _repr_html_(self):\n\n import numpy as np\n import matplotlib.pyplot as plt\n from .._tier9 import imshow\n\n\n size_in_pixels = np.prod(self.shape)\n size_in_bytes = size_in_pixels * self.dtype.itemsize\n\n labels = (self.dtype == np.uint32)\n\n # In case the image is 2D, 3D and larger than 100 pixels, turn on fancy view\n if len(self.shape) in (2, 3) and size_in_pixels >= 100:\n import matplotlib.pyplot as plt\n imshow(self,\n labels=labels,\n continue_drawing=True,\n colorbar=not labels)\n image = self._png_to_html(self._plt_to_png())\n else:\n return \"<pre>cle.array(\" + str(np.asarray(self)) + \", dtype=\" + str(self.dtype) + \")</pre>\"\n\n\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n size = \"{:.1f}\".format(size_in_bytes) + \" GB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" MB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" kB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" B\"\n\n histogram = \"\"\n\n if size_in_bytes < 100 * 1024 * 1024:\n if not labels:\n\n import numpy as np\n from .._tier2 import minimum_of_all_pixels, maximum_of_all_pixels\n from .._tier3 import histogram\n\n num_bins = 32\n\n h = np.asarray(histogram(self, num_bins=num_bins))\n\n plt.figure(figsize=(1.8, 1.2))\n plt.bar(range(0, len(h)), h)\n\n # hide axis text\n # https://stackoverflow.com/questions/2176424/hiding-axis-text-in-matplotlib-plots\n # https://pythonguides.com/matplotlib-remove-tick-labels\n frame1 = plt.gca()\n frame1.axes.xaxis.set_ticklabels([])\n frame1.axes.yaxis.set_ticklabels([])\n plt.tick_params(left=False, bottom=False)\n\n histogram = self._png_to_html(self._plt_to_png())\n\n min_max = \"<tr><td>min</td><td>\" + str(self.min()) + \"</td></tr>\" + \\\n \"<tr><td>max</td><td>\" + str(self.max()) + \"</td></tr>\"\n\n else:\n\n min_max = \"\"\n\n all = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n image,\n \"</td>\",\n \"<td style=\\\"text-align: center; vertical-align: top;\\\">\",\n \"<b><a href=\\\"https://github.com/clEsperanto/pyclesperanto_prototype\\\" target=\\\"_blank\\\">cle._</a> image</b><br/>\",\n \"<table>\",\n \"<tr><td>shape</td><td>\" + str(self.shape).replace(\" \", \"&nbsp;\") + \"</td></tr>\",\n \"<tr><td>dtype</td><td>\" + str(self.dtype) + \"</td></tr>\",\n \"<tr><td>size</td><td>\" + size + \"</td></tr>\",\n min_max,\n \"</table>\",\n histogram,\n \"</td>\",\n \"</tr>\",\n \"</table>\",\n ]\n\n return \"\\n\".join(all)", "def show(self):\n import IPython.display\n disp = IPython.display.HTML(self.render())\n return IPython.display.display(disp, display_id=str(id(self)))", "def exp_summary(habitat,temperature,species):\n plt.subplot(2,2,1)\n niches(species)\n plt.subplot(2,2,2)\n environment(habitat,temperature)\n plt.subplot(2,2,3)\n show_matrix(habitat,\"habitat\")\n plt.subplot(2,2,4)\n show_matrix(temperature,\"temperature\")", "def show_plot(self):\n label_1 = (self.own_name_1 + \"'s account\")\n label_2 = (self.own_name_2 + \"'s account\")\n clusters = 3\n counts_1 = (self.op_full_name_count_1, self.op_first_name_count_1, self.op_last_name_count_1)\n counts_2 = (self.op_full_name_count_2, self.op_first_name_count_2, self.op_last_name_count_2)\n fig, ax = plt.subplots()\n index = np.arange(clusters)\n bar_width = 0.2\n opacity = 0.5\n rects1 = plt.bar(index, counts_1, bar_width, alpha=opacity, color=\"b\", label=label_1)\n rects2 = plt.bar(index + bar_width, counts_2, bar_width, alpha=opacity, color=\"g\", label=label_2)\n #plt.xlabel(\"Name forms\")\n plt.ylabel(\"Number of references\")\n plt.title(\"Reference of opponents name\")\n plt.xticks(index + bar_width, (\"Opponent's Full Name\", \"Opponent's First Name only\", \"Opponent's Last name only\"))\n plt.legend()\n plt.tight_layout()\n plt.show()", "def show_flavors():\n return get_flavors()", "def display_library_info():\n print \"in display library info \\n\"\n library_list = model.get_libraries_info(model.db_session, session)\n return render_template('library.html', libraries=library_list)", "def __repr__(self):\n\n width = 40\n\n sections, section_titles = self._get_summary_struct()\n out = _tkutl._toolkit_repr_print(self, sections, section_titles,\n width=width)\n return out", "def plot_feature_histograms(self):\n\n num_features = len(self.feature_names)\n fig, ax = plt.subplots(nrows=1, ncols=num_features, figsize=(50, 2), tight_layout=True)\n fig.suptitle('Histograms of 19 features', fontsize=14)\n\n for i in range(num_features):\n ax[i].hist(self.train_data[self.train_data.columns[i]], bins=50)\n ax[i].set_xlabel(self.train_data.columns[i])\n\n plt.savefig(r'data_analysis\\histograms_' + self.file_name + '.png', \n facecolor=fig.get_facecolor(), bbox_inches='tight')", "def describe(self):\n raise NotImplementedError()", "def describe(self):\n raise NotImplementedError()", "def info(cls):\n return 'Xray plots'", "def get_component_html(self):\n study_list = '\\n'.join([study.get_name_link_html() for study in self.get_source_studies()])\n age_list = '\\n'.join([trait.get_name_link_html() for trait in self.component_age_traits.all()])\n component_html = '\\n'.join([\n trait.get_component_html(harmonization_unit=self) for trait in self.harmonizedtrait_set.all()])\n panel_body = []\n if len(study_list) > 0:\n study_html = INLINE_LIST_HTML.format(list_title='Included studies', list_elements=study_list)\n panel_body.append(study_html)\n if len(age_list) > 0:\n age_html = INLINE_LIST_HTML.format(list_title='Component age variables', list_elements=age_list)\n panel_body.append(age_html)\n panel_body.append(component_html)\n panel_body = '\\n'.join(panel_body)\n unit_panel = PANEL_HTML.format(panel_title='Harmonization unit: {}'.format(self.i_tag), panel_body=panel_body)\n return unit_panel", "def describe(self):\n print(\"Number of nodes: {0}\".format(self.nnodes))\n print(\"Number of interfaces: {0}\".format(self.ninterfaces))\n print(\"Number of elements: {0}\".format(self.nelements))", "def _repr_html_(self):\n import io\n import base64\n from PIL import Image\n\n library_name = \"vedo.assembly.Assembly\"\n help_url = \"https://vedo.embl.es/docs/vedo/assembly.html\"\n\n arr = self.thumbnail(zoom=1.1, elevation=-60)\n\n im = Image.fromarray(arr)\n buffered = io.BytesIO()\n im.save(buffered, format=\"PNG\", quality=100)\n encoded = base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n url = \"data:image/png;base64,\" + encoded\n image = f\"<img src='{url}'></img>\"\n\n # statisitics\n bounds = \"<br/>\".join(\n [\n vedo.utils.precision(min_x, 4) + \" ... \" + vedo.utils.precision(max_x, 4)\n for min_x, max_x in zip(self.bounds()[::2], self.bounds()[1::2])\n ]\n )\n\n help_text = \"\"\n if self.name:\n help_text += f\"<b> {self.name}: &nbsp&nbsp</b>\"\n help_text += '<b><a href=\"' + help_url + '\" target=\"_blank\">' + library_name + \"</a></b>\"\n if self.filename:\n dots = \"\"\n if len(self.filename) > 30:\n dots = \"...\"\n help_text += f\"<br/><code><i>({dots}{self.filename[-30:]})</i></code>\"\n\n allt = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n image,\n \"</td>\",\n \"<td style='text-align: center; vertical-align: center;'><br/>\",\n help_text,\n \"<table>\",\n \"<tr><td><b> nr. of objects </b></td><td>\"\n + str(self.GetNumberOfPaths())\n + \"</td></tr>\",\n \"<tr><td><b> position </b></td><td>\" + str(self.GetPosition()) + \"</td></tr>\",\n \"<tr><td><b> diagonal size </b></td><td>\"\n + vedo.utils.precision(self.diagonal_size(), 5)\n + \"</td></tr>\",\n \"<tr><td><b> bounds </b> <br/> (x/y/z) </td><td>\" + str(bounds) + \"</td></tr>\",\n \"</table>\",\n \"</table>\",\n ]\n return \"\\n\".join(allt)", "def subplot_2(self, Graph, n_tabs):\n # The code below walks does a pre-order traversal of the tree\n # For exact details about the structure of self.Graph refer description in init function.\n\n attr_name = list(Graph.keys())[0]\n print(\"\\t\"*(n_tabs),\"feature name :\",attr_name)\n for val in list(Graph[attr_name].keys()):\n if (val[1]==1):\n des = \"greater\"\n else:\n des = \"lower\"\n print(\"\\t\"*(n_tabs+1),\"feature threashold :\", val[0],\" \",des)\n sub_graph = Graph[attr_name][val]\n if (type(sub_graph)==dict):\n self.subplot_2(sub_graph, n_tabs+2)\n else:\n print(\"\\t\"*(n_tabs+2), \"prediction :\",sub_graph)", "def show(self) -> None:", "def inspect(self, axis_units='px', frontview=True):\n ax = super().inspect(axis_units=axis_units, frontview=frontview)\n scale = self._get_plot_scale_factor(axis_units)\n\n # Label modules and tiles\n for ch, module in enumerate(self.modules):\n s = 'Q{Q}M{M}'.format(Q=(ch // 4) + 1, M=(ch % 4) + 1)\n cx, cy, _ = module[0].centre() * scale\n ax.text(cx, cy, s, fontweight='bold',\n verticalalignment='center',\n horizontalalignment='center')\n\n for t in [7, 8, 15]:\n cx, cy, _ = module[t].centre() * scale\n ax.text(cx, cy, 'T{}'.format(t + 1),\n verticalalignment='center',\n horizontalalignment='center')\n\n ax.set_title('LPD-1M detector geometry ({})'.format(self.filename))\n return ax", "def _title(profile):\n if profile['operation'] == 'differential':\n p1, p2 = profile['profiles']\n return 'differential ({}, {})'.format(_title(p1), _title(p2))\n elif profile['operation'] == 'local feature':\n p = profile['profile']\n return 'local feature {} ({})'.format(profile['function'], _title(p))\n else:\n return ' '.join([str(x) for x in profile.values()])", "def display_linear_model_features(model_name, coefs, save=False, prefix_name_fig=None, folder='Charts'):\r\n\r\n imp_coefs = coefs.sort_values()\r\n imp_coefs.plot(kind = \"barh\")\r\n plt.title(\"Feature importance using {} Model\".format(model_name))\r\n \r\n if save == True:\r\n prefix_name_fig = prefix_name_fig + '_' if prefix_name_fig is not None else ''\r\n plt.savefig(folder + '/' + prefix_name_fig + '.png')", "def overview():\n pages_list = g.db.pages.find().sort('name')\n return render_template('{}/index.html'.format(MODULE_DIR), **locals() )", "def _create_general_data_page(self, notebook):\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Build-up the containers for the tab. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC,\r\n gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"General Information\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Place the widgets used to display general information about #\r\n # the function. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _labels = [_(u\"Function Code:\"), _(u\"Function Name:\")]\r\n (_max1, _y_pos1) = Widgets.make_labels(_labels, _fixed, 5, 5)\r\n\r\n _labels = [_(u\"Total Cost:\"), _(u\"Total Mode Count:\"),\r\n _(u\"Total Part Count:\"), _(u\"Remarks:\")]\r\n _y_start = self.txtName.size_request()[1] + _y_pos1[1] + 5\r\n (_max2, _y_pos2) = Widgets.make_labels(_labels, _fixed, 5, _y_start)\r\n _x_pos = max(_max1, _max2) + 50\r\n\r\n # Set the tooltips.\r\n self.txtCode.set_tooltip_text(_(u\"Enter a unique code for the \"\r\n u\"selected function.\"))\r\n self.txtName.set_tooltip_text(_(u\"Enter the name of the selected \"\r\n u\"function.\"))\r\n self.txtTotalCost.set_tooltip_text(_(u\"Displays the total cost of \"\r\n u\"the selected function.\"))\r\n self.txtModeCount.set_tooltip_text(_(u\"Displays the total number \"\r\n u\"of failure modes \"\r\n u\"associated with the \"\r\n u\"selected function.\"))\r\n self.txtPartCount.set_tooltip_text(_(u\"Displays the total number \"\r\n u\"of components associated \"\r\n u\"with the selected \"\r\n u\"function.\"))\r\n self.txtRemarks.set_tooltip_text(_(u\"Enter any remarks related to \"\r\n u\"the selected function.\"))\r\n self.chkSafetyCritical.set_tooltip_text(_(u\"Indicates whether or \"\r\n u\"not the selected \"\r\n u\"function is safety \"\r\n u\"critical.\"))\r\n\r\n # Place the widgets.\r\n _fixed.put(self.txtCode, _x_pos, _y_pos1[0])\r\n _fixed.put(self.txtName, _x_pos, _y_pos1[1])\r\n _fixed.put(self.txtTotalCost, _x_pos, _y_pos2[0])\r\n _fixed.put(self.txtModeCount, _x_pos, _y_pos2[1])\r\n _fixed.put(self.txtPartCount, _x_pos, _y_pos2[2])\r\n _fixed.put(self.txtRemarks, _x_pos, _y_pos2[3])\r\n _fixed.put(self.chkSafetyCritical, 5, _y_pos2[3] + 110)\r\n\r\n # Connect to callback functions for editable gtk.Widgets().\r\n self._lst_handler_id.append(\r\n self.txtCode.connect('focus-out-event', self._on_focus_out, 4))\r\n _textview = self.txtName.get_child().get_child()\r\n self._lst_handler_id.append(\r\n _textview.connect('focus-out-event', self._on_focus_out, 14))\r\n _textview = self.txtRemarks.get_child().get_child()\r\n self._lst_handler_id.append(\r\n _textview.connect('focus-out-event', self._on_focus_out, 15))\r\n\r\n # Connect to callback functions for uneditable gtk.Widgets().\r\n self.txtTotalCost.connect('changed', self._on_changed, 5)\r\n self.txtModeCount.connect('changed', self._on_changed, 16)\r\n self.txtPartCount.connect('changed', self._on_changed, 17)\r\n\r\n _fixed.show_all()\r\n\r\n # Insert the tab.\r\n _label = gtk.Label()\r\n _label.set_markup(\"<span weight='bold'>\" + _(u\"General\\nData\") +\r\n \"</span>\")\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.set_tooltip_text(_(u\"Displays general information for the \"\r\n u\"selected function.\"))\r\n _label.show_all()\r\n notebook.insert_page(_frame, tab_label=_label, position=-1)\r\n\r\n return False", "def display_flavors(self):\r\n print(\"We have the following flavors\"\"\")\r\n for flavor in self.flavors:\r\n print(\" ...\" + str(flavor.title()))", "def __html__(self) -> str:\n components = [\n f'{self.name}' if self.name else '',\n f'{self.repository}',\n ]\n return ', '.join([component for component in components if component])", "def features(request):\n # Order features by amount of upvotes\n features_list = Feature.objects.all().order_by('-upvotes')\n \n # Pagination for features\n page = request.GET.get('page', 1)\n paginator = Paginator(features_list, 10)\n try:\n features = paginator.page(page)\n except PageNotAnInteger:\n features = paginator.page(1)\n except EmptyPage:\n features = paginator.page(paginator.num_pages)\n \n # Display graphs\n chart_total_feature = FeaturesTotalChart() \n chart_feature_daily = FeaturesDailyStatus()\n chart_feature_weekly = FeaturesWeeklyStatus()\n chart_feature_monthly = FeaturesMonthlyStatus()\n \n return render(request, \"features.html\", {\n \"features\": features,\n 'chart_total_feature': chart_total_feature,\n 'chart_feature_daily': chart_feature_daily,\n 'chart_feature_weekly': chart_feature_weekly,\n 'chart_feature_monthly': chart_feature_monthly\n })", "def visualize(model: Model, structural_part=True, measurement_part=False,\n view=True, filename=None, title=''):\n g = gv.Digraph(format='jpg', graph_attr={'label': title})\n if structural_part:\n g.node_attr.update(color='red', shape='box')\n for i, j in model.parameters['Beta']:\n lval, rval = model.beta_names[0][i], model.beta_names[0][j]\n g.edge(rval, lval)\n if measurement_part:\n g.node_attr.update(color='black', shape='circle')\n for i, j in model.parameters['Lambda']:\n lval, rval = model.lambda_names[0][i], model.lambda_names[0][j]\n g.edge(lval, rval)\n g.render(filename, view=view)", "def experiment_show_table_format(experiment):\n from msrestazure.tools import parse_resource_id\n row = OrderedDict()\n row['Name'] = experiment['name']\n row['Resource Group'] = experiment['resourceGroup']\n row['Workspace'] = parse_resource_id(experiment['id'])['name']\n row['State'] = experiment['provisioningState']\n return row", "def visualize(self):\n return \"https://neuroglancer.bossdb.io/#!{'layers':{'image':{'source':'boss://__replace_me__'}}}\".replace(\n \"__replace_me__\",\n f\"{self.volume_provider.boss._project._base_protocol}://{self.volume_provider.boss._project._base_url}/{self.collection_name}/{self.experiment_name}/{self.channel_name}\",\n )", "def __repr__(self):\r\n return f\"{self.name} {self.status_name} {self.window_start} {self.wiki_url} {self.pad_location} {self.image}\"", "def get_feature_names(self):\n ...", "def visualize_instance_html(self, exp, label, div_name, exp_object_name,\n text=True, opacity=True):\n raw_string = ' '.join(self.indexed_string.keys())\n keys = self.indexed_string.keys()\n if not text:\n return u''\n text = (raw_string.encode('utf-8', 'xmlcharrefreplace').decode('utf-8'))\n text = re.sub(r'[<>&]', '|', text)\n exp = [(x[0], keys.index(x[0]), x[1]) for x in exp]\n all_occurrences = list(itertools.chain.from_iterable(\n [itertools.product([x[0]], x[1], [x[2]]) for x in exp]))\n all_occurrences = [(x[0], int(x[1]), x[2]) for x in all_occurrences]\n ret = '''\n %s.show_raw_text(%s, %d, %s, %s, %s);\n ''' % (exp_object_name, json.dumps(all_occurrences), label,\n json.dumps(text), div_name, json.dumps(opacity))\n return ret", "def display_flavors(self):\n for flavor in self.flavors:\n print(f\"- {flavor}\")", "def get_overview_annotations() -> dict:\n return {}", "def summary(self):\n if _have_ipython:\n IPython.display.display(IPython.display.HTML(self._repr_html_()))\n else:\n print(self)", "def __repr__(self):\n return '<HomepageFeatures(title=%r, body=%r, img_path_xs=%r, img_path_sm=%r, img_path_md=%r, img_path_lg=%r, is_active=%r)>' % (self.title, self.body, self.img_path_xs, self.img_path_sm, self.img_path_md, self.img_path_lg, self.is_active)", "def displayInfo(self, model):\n\t\ttaglist = []\n\t\tfor tag in model.tags:\n\t\t\ttaglist.append(tag.tagname)\n\t\tself.infoText.SetPage(infoTemplate.render(model=model, tags= ','.join(taglist)))", "def project_overview(project_name):\n if not db_find_project(project_name):\n abort(404)\n\n _project = Project.objects(project_name=project_name).first()\n # _forks = ProjectFork.objects(project_name=project_name, file_list__ne=[], total_changed_line_number__ne=0)\n _forks = ProjectFork.objects(project_name=project_name, total_changed_line_number__ne=0)\n\n # TODO _all_tags could be opted by AJAX\n _all_tags = {}\n if current_user.is_authenticated:\n _project_tags = ForkTag.objects(project_name=project_name, username=current_user.username)\n for tag in _project_tags:\n _all_tags[tag.fork_full_name] = tag.tags\n\n if current_user.is_authenticated:\n print('View: ', current_user.username, project_name)\n\n return render_template('project_overview.html', project=_project, forks=_forks, all_tags=_all_tags)", "def RenderAsHtml(self):\n html = '<table>'\n\n for p in FrontendJob._properties:\n if p == 'log' or p == 'clovis_task':\n continue\n value = getattr(self, p)\n if value:\n html += '<tr><td>' + p + '</td><td>' + str(value) + '</td></tr>'\n\n html += '</table>'\n return html", "def _repr_html_(self):\n\n return self._repr__base(rich_output=True)", "def _repr_html_(self):\n\n return self._repr__base(rich_output=True)", "def display_section(name):\n assert all((GENERAL, TRAINING, DETECTION, EVALUATION))\n section_frame = pd.DataFrame(eval(name)).T.fillna('-')\n section_frame['flags'] = section_frame.index.values\n section_frame['flags'] = section_frame['flags'].apply(lambda c: f'--{c}')\n section_frame = section_frame.reset_index(drop=True).set_index('flags')\n print(f'\\n{name.title()}\\n')\n print(\n section_frame[\n [\n column_name\n for column_name in ('help', 'required', 'default')\n if column_name in section_frame.columns\n ]\n ].to_markdown()\n )", "def intf_VIEWSHOW(E):\n out= \"View Properties\\n\"\n out+= \"---------------\\n\"\n out+= \"svgoutfile=%s\\n\" % OUT.outfile\n out+= \"camera=%s {camset}\\n\" % (','.join([str(x) for x in OUT.camera]))\n out+= \"target=%s {tarset}\\n\" % (','.join([str(x) for x in OUT.target]))\n out+= \"opacity=%s {hlr,hide}\\n\" % str(OUT.opacity)\n out+= \"facelines=%s {facelines}\\n\" % str(OUT.facelines)\n out+= \"vlinewidth=%0.2f {vlw,viewlinewidth}\\n\" % OUT.vlinewidth\n out+= \"vrefreshms=%d {refreshms,viewrefreshms}\\n\" % OUT.vrefreshms\n out+= \"vbox=(%d,%d) {viewbox[xy]}\\n\" % (OUT.vboxX,OUT.vboxY)\n out+= \"vtran=(%d,%d) {vtran[xy],viewtran[xy]}\\n\" % (OUT.vtranX,OUT.vtranY)\n out+= \"vscale=(%d,%d) {vscale[xy],viewscale[xy]}\\n\" % (OUT.vscaleX,OUT.vscaleY)\n print(out)", "def backend_description(self) -> str:", "def vtk_viewer(request):\n try:\n data = _refresh(request)\n except Exception:\n data = {}\n data['main'] = 'main'\n data['error'] = 'error'\n data['search'] = {\n 'help': ''\n }\n options = {\n 'resizable': True\n }\n data['options'] = mark_safe(json.dumps(options))\n return render(\n request,\n 'vtk_view/cdat_viewer.html',\n data\n )", "def show_help():\n\n url = (\n r\"https://agcloud.sharepoint.com/:p:/r/sites/\"\n r\"O365-UG-2HEngineeringSoftware/Shared%20Documents/2H%20Datalab/\"\n r\"DataLab%20Guidance.pptx?d=wcabe347939784784b8d7270cdf7938e7&csf=1&e=9LJsCD\"\n )\n webbrowser.open(url)", "def ShowScopes(scopes):\n columns = None\n if columns:\n headers = []\n data_list = []\n else:\n headers = ['Scope ID', 'Name', 'Parent Scope', 'VRF', 'Policy Priority']\n data_list = [[x['id'],\n x['name'],\n x['parent_app_scope_id'],\n x['vrf_id'], x['policy_priority']] for x in scopes ]\n table = columnar(data_list, headers, no_borders=False)\n print(table)", "def _create_analyses_input_page(self, notebook): # pylint: disable=R0914\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Build-up the containers for the tab. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _hbox = gtk.HPaned()\r\n\r\n _fixed = gtk.Fixed()\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Analysis Inputs\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_IN)\r\n _frame.add(_fixed)\r\n\r\n _hbox.pack1(_frame, True, True)\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Place the widgets used to display analysis input information. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Load the gtk.ComboBox() widgets.\r\n _results = [[u\"MCF\"], [u\"Kaplan-Meier\"], [_(u\"NHPP - Power Law\")],\r\n [u\"NHPP - Loglinear\"], [_(u\"Exponential\")],\r\n [_(u\"Lognormal\")], [_(u\"Normal\")], [u\"Weibull\"],\r\n [\"WeiBayes\"]]\r\n Widgets.load_combo(self.cmbDistribution, _results)\r\n _results = [[_(u\"Lower One-Sided\")], [_(u\"Upper One-Sided\")],\r\n [_(u\"Two-Sided\")]]\r\n Widgets.load_combo(self.cmbConfType, _results)\r\n _results = [[_(u\"Crow (NHPP Only)\")], [_(u\"Duane (NHPP Only)\")],\r\n [_(u\"Fisher Matrix\")], [_(u\"Likelihood\")],\r\n [_(u\"Bootstrap\")]]\r\n Widgets.load_combo(self.cmbConfMethod, _results)\r\n _results = [[\"MLE\"], [_(u\"Regression\")]]\r\n Widgets.load_combo(self.cmbFitMethod, _results)\r\n\r\n # Create the labels for the left half of the right side.\r\n _labels = [_(u\"Assembly:\"), _(u\"Description:\"), _(u\"Distribution:\"),\r\n _(\"Fit Method:\"), _(u\"Confidence:\"), _(u\"Confidence Type:\"),\r\n _(\"Confidence Method:\")]\r\n (_x_pos1, _y_pos1) = Widgets.make_labels(_labels, _fixed, 5, 5)\r\n _x_pos1 += 55\r\n\r\n # Create the labels for the right half of the right side.\r\n _labels = [_(u\"Start Time:\"), _(u\"End Time:\"), _(u\"Step Interval:\"),\r\n _(u\"Start Date:\"), _(u\"End Date:\")]\r\n (_x_pos2,\r\n _y_pos2) = Widgets.make_labels(_labels, _fixed, _x_pos1 + 215, 5)\r\n _x_pos2 += _x_pos1\r\n _x_pos2 += 275\r\n\r\n # Place widgets on the left side.\r\n _fixed.put(self.cmbAssembly, _x_pos1, _y_pos1[0])\r\n _fixed.put(self.txtDescription, _x_pos1, _y_pos1[1])\r\n _fixed.put(self.cmbDistribution, _x_pos1, _y_pos1[2])\r\n _fixed.put(self.cmbFitMethod, _x_pos1, _y_pos1[3])\r\n _fixed.put(self.txtConfidence, _x_pos1, _y_pos1[4])\r\n _fixed.put(self.cmbConfType, _x_pos1, _y_pos1[5])\r\n _fixed.put(self.cmbConfMethod, _x_pos1, _y_pos1[6])\r\n\r\n # Place widgets on the right side.\r\n _fixed.put(self.txtStartTime, _x_pos2, _y_pos2[0])\r\n _fixed.put(self.txtEndTime, _x_pos2, _y_pos2[1])\r\n _fixed.put(self.txtRelPoints, _x_pos2, _y_pos2[2])\r\n _fixed.put(self.txtStartDate, _x_pos2, _y_pos2[3])\r\n _fixed.put(self.btnStartDate, _x_pos2 + 105, _y_pos2[3])\r\n _fixed.put(self.txtEndDate, _x_pos2, _y_pos2[4])\r\n _fixed.put(self.btnEndDate, _x_pos2 + 105, _y_pos2[4])\r\n _fixed.put(self.chkGroup, _x_pos2, _y_pos2[4] + 30)\r\n _fixed.put(self.chkParts, _x_pos2, _y_pos2[4] + 60)\r\n\r\n _fixed.show_all()\r\n\r\n # Insert the tab.\r\n _label = gtk.Label()\r\n _label.set_markup(\"<span weight='bold'>\" +\r\n _(u\"Analysis\\nInputs\") + \"</span>\")\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.show_all()\r\n _label.set_tooltip_text(_(u\"Displays analysis inputs for the selected \"\r\n u\"dataset.\"))\r\n notebook.insert_page(_hbox, tab_label=_label, position=-1)\r\n\r\n return False", "def show_custom_graph(self):\n pass" ]
[ "0.5600265", "0.5545297", "0.5515862", "0.52539235", "0.52248704", "0.51869327", "0.518335", "0.5182476", "0.51630205", "0.5079258", "0.5056503", "0.50485706", "0.50290495", "0.50201535", "0.5011314", "0.49888295", "0.496754", "0.49586928", "0.4941969", "0.49364", "0.4936073", "0.49315485", "0.491883", "0.4916707", "0.49138457", "0.48921895", "0.48902094", "0.48876578", "0.4886318", "0.4873332", "0.48618656", "0.48545423", "0.484576", "0.4844518", "0.4831105", "0.48290142", "0.48205924", "0.4812263", "0.48036614", "0.4799779", "0.4797972", "0.4779778", "0.47784328", "0.4773944", "0.4772931", "0.4771327", "0.47697738", "0.47623995", "0.47614673", "0.47549337", "0.47476733", "0.47447973", "0.47447973", "0.474222", "0.4741777", "0.4741567", "0.47392935", "0.47323236", "0.4729139", "0.47282428", "0.47282404", "0.47208405", "0.47208405", "0.47201848", "0.471685", "0.47127885", "0.47028443", "0.46887404", "0.46795717", "0.46762392", "0.46718958", "0.4671472", "0.467137", "0.466042", "0.46601376", "0.46592137", "0.46570465", "0.46550435", "0.46490192", "0.46478882", "0.46458316", "0.46419477", "0.4639903", "0.46380997", "0.46331692", "0.46314195", "0.46285456", "0.46232796", "0.4616135", "0.46146053", "0.46137816", "0.46137816", "0.4612807", "0.461191", "0.4611382", "0.46052566", "0.46023226", "0.46000195", "0.4599772", "0.45976764" ]
0.577709
0
Html representation of Facets Dive for use in a Jupyter notebook.
def _repr_html_(self) -> str: html_template = """ <script src="{webcomponents_js}"></script> <link rel="import" href="{facets_html}"> <facets-dive id="dive_elem" height="{height}"></facets-dive> <script> document.querySelector("#dive_elem").data = {data}; </script>""" html = html_template.format( facets_html=FACETS_DEPENDENCIES['facets_html'], webcomponents_js=FACETS_DEPENDENCIES['webcomponents_js'], data=self._data.to_json(orient='records'), height=self.height, ) return html
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _repr_html_(self) -> str:\n protostr = base64.b64encode(self._proto.SerializeToString()).decode('utf-8')\n html_template = '''\n <script src=\"{webcomponents_js}\"></script>\n <link rel=\"import\" href=\"{facets_html}\">\n <facets-overview id=\"overview_elem\"></facets-overview>\n <script>\n document.querySelector(\"#overview_elem\").protoInput = \"{protostr}\";\n </script>'''\n html = html_template.format(\n facets_html=FACETS_DEPENDENCIES['facets_html'],\n webcomponents_js=FACETS_DEPENDENCIES['webcomponents_js'],\n protostr=protostr,\n )\n return html", "def vggface2_labels(self):\n id_meta = pd.read_csv(\"loki/static/models/vggface2/identity_meta.csv\",\n sep=\"\\n\")\n id_meta = id_meta[\n 'Class_ID, Name, Sample_Num, Flag, Gender'].str\\\n .split(',', expand=True)\n\n id_meta.columns = [\n 'Class_ID', 'Name', 'Sample_Num', 'Flag', 'Gender', 'None']\n id_meta.drop(columns=['None'], inplace=True)\n\n vgg_names = id_meta.drop(columns=[\n 'Sample_Num', 'Flag', 'Gender']).set_index('Class_ID')\n\n return vgg_names", "def _repr_html_(self):\n return (\n f'<b>GalaxyCluster:</b> {self.unique_id} '\n f'(ra={self.ra}, dec={self.dec}) at z={self.z}'\n f'<br>> <b>with columns:</b> {self._str_colnames()}'\n f'<br>> {len(self.galcat)} source galaxies'\n f'<br>{self.galcat._html_table()}'\n )", "def display_feature(self):\n return ', '.join([feature.name for feature in self.features.all()])", "def export_face(ind, face):\n isplane, center, radius, face_size = get_sphere_info(face)\n if isplane:\n macro = \"FlatFace({}, {}, array[{}]{{{}}}, {}, {})\\n\"\n return macro.format(ind, len(face), len(face), pov_vector_list(face),\n pov_vector(center), face_size)\n else:\n macro = \"BubbleFace({}, {}, array[{}]{{{}}}, {}, {}, {})\\n\"\n return macro.format(ind, len(face), len(face), pov_vector_list(face),\n pov_vector(center), radius, face_size)", "def _repr_html_(self): # pragma: no cover\n return Utils.render_html('extent.html', extent=self)", "def __dxf__(self):\n return tags2str(self)", "def _linked_feature_label(linked_feature):\n\treturn \"\"\"<\n <B>{name}</B><BR />\n F={num_features} D={projected_dim}<BR />\n {fml}<BR />\n <U>{source_translator}</U><BR />\n <I>{source_layer}</I>\n >\"\"\".format(\n\t\tname=linked_feature.name, num_features=linked_feature.size, projected_dim=linked_feature.embedding_dim, fml=linked_feature.fml, source_translator=linked_feature.source_translator, source_layer=linked_feature.source_layer\n\t)", "def _ipython_display_(self):\n spec, render_type = self._get_spec_info()\n\n id = uuid.uuid4()\n publish_display_data(\n {'text/html': self._generate_html(id)},\n metadata={'jupyter-vega3': '#{0}'.format(id)}\n )\n publish_display_data(\n {'application/javascript':\n self._generate_js(id, spec, render_type)},\n metadata={'jupyter-vega3': '#{0}'.format(id)}\n )", "def tags():\n tag = \"\"\n tag += \"Supported inflexions and appropriate keys\\n\\n\"\n for item in vkeys.keys():\n tag += (\"%s\\t - %s\\n\" %(item.ljust(10,' '), vkeys[item]))\n return tag", "def features():\n\n return render_template('features.html')", "def __repr__(self):\n return '{}({})'.format(type(self).__name__, ', '.join(repr(self[feat]) for feat in self.features))", "def _repr_html_(self):\n return (\n f'<b>{self.__class__.__name__}</b>'\n f'<br> <b>defined by:</b> {self._str_meta_()}'\n f'<br> <b>with columns:</b> {self._str_colnames()}'\n f'<br> {len(self)} objects'\n f'<br> {self._html_table()}'\n )", "def tags():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 7772400\r\n section.page_height = 10058400\r\n document.add_heading('Tags', level=1)\r\n tags = get_qlik_sense.get_tag()\r\n num_of_tags = len(tags)\r\n table = document.add_table(rows=num_of_tags+1, cols=1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'name'\r\n for tag in range(num_of_tags):\r\n row = table.rows[tag+1]\r\n row.cells[0].text = str(tags[tag])", "def view_ballot_entities_svg(self, request):\n\n layout = VoteLayout(\n self.vote, request, tab='{}-entities'.format(self.type)\n )\n return {\n 'path': layout.svg_path,\n 'name': layout.svg_name\n }", "def get_html(self):\r\n context = {\r\n 'display_name': self.display_name_with_default,\r\n 'instructions_html': self.instructions,\r\n 'annotation_storage': self.annotation_storage_url,\r\n 'token': retrieve_token(self.user, self.annotation_token_secret),\r\n 'tag': self.instructor_tags,\r\n 'openseadragonjson': self.openseadragonjson,\r\n }\r\n\r\n return self.system.render_template('imageannotation.html', context)", "def repr_figure(self):\n\n default_kwargs = {'placement': self.placement,\n 'caption': self.caption,\n 'label': self.label,\n 'figure_env_name': self.figure_env_name}\n\n myfig = self.extension_mapping[self.extension]()\n\n return self.fig_str.format(myfig=myfig, **default_kwargs)", "def get_feature_names(self):\n ...", "def _repr_html_(self):\n\n return self._repr__base(rich_output=True)", "def _repr_html_(self):\n\n return self._repr__base(rich_output=True)", "def __repr__(self: GtinFormat) -> str:\n return f\"GtinFormat.{self.name}\"", "def describe(self):\n\n ret = []\n ret.append(\"Functional ID: %s\" % self._number)\n ret.append(\"Functional Name: %s\" % self._xc_func_name)\n ret.append(\"Attributes:\")\n ret.append(\" Name: %s\" % self._name)\n ret.append(\" Kind: %d\" % self._kind)\n ret.append(\" Family: %d\" % self._family)\n ret.append(\"Citations:\")\n for x in self._refs:\n ret.append(\" \" + x)\n\n return \"\\n\".join(ret)", "def show_features_datatypes(df):\n\tfor inum,icol in enumerate(df.columns):\n\t\tprint('Column id: {0:3d} \\tName: {1:12s} \\tDataType: {2}'.format(inum, icol, df[icol].dtypes))", "def _repr_html_(self) -> str:\n return self.all(pandas=True)._repr_html_() # type: ignore", "def vgg19(tensorized, **kwargs):\n return _vgg('vgg19', 'E', False, tensorized, **kwargs)", "def _repr_html_(self):\n params = OrderedDict()\n params[\"Name\"] = self.name\n params[\"Description\"] = self.description\n params[\"Ns\"] = self.Ns\n params[\"Ni\"] = self.Ni\n params[\"Kinetic Parameter\"] = self.kinetic_parameter_type\n params[\"Kinetic Parameter Value\"] = self.kinetic_parameter_value \n \n header = \"<table>\"\n footer = \"</table>\"\n html = \"\"\n\n for key, val in params.items():\n html += \"<tr><td>{0}</td><td>{1}</td></tr>\".format(key, val)\n\n return header + html + footer", "def paint_faces_data(frame, faces_data):\n for face in faces_data:\n (top, right, bottom, left) = face['location']\n\n if face['identity'] is None:\n name = 'Unknown'\n color = (0, 0, 255) # red\n else:\n name = face['identity']\n color = (0, 128, 0) # dark green\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), color, 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), color, cv2.FILLED)\n cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1)", "def __repr__(self):\n\n p = self\n\n return f\"<tag_name = {p.tag_name}\"", "def _features_of(entry: _LexiconEntry) -> str:\n return entry[\"features\"]", "def _repr_html_(self):\n # pylint: disable=protected-access\n return self.folium_map._repr_html_()\n # pylint: enable=protected-access", "def render_flotplot(self, ctx, data):\n\t\tplotOptions = base.getMetaText(self.service, \"_plotOptions\")\n\t\tif plotOptions is not None:\n\t\t\targs = \", %s\"%plotOptions\n\t\telse:\n\t\t\targs = \"\"\n\t\treturn ctx.tag(onclick=\"openFlotPlot($('table.results')%s)\"%args)", "def default_display_function(feature):\n # n_samples = min(n_samples, feature.shape[0])\n IPython.display.display(widgets.Box(layout=widgets.Layout(height=\"2.5%\")))\n IPython.display.display(feature)\n IPython.display.display(widgets.Box(layout=widgets.Layout(height=\"2.5%\")))", "def __html__(self, tags:defaultdict) -> str:\n html = \"\"\n\n # Lens detail\n if tags['EXIF LensModel']:\n html += f\"<p class='lens'>{tags['EXIF LensModel']}</p>\\n\"\n \n # Focal length\n if tags['EXIF FocalLengthIn35mmFilm']:\n if tags['EXIF FocalLengthIn35mmFilm'] != tags['EXIF FocalLength']:\n html += f\"<p class='focal-length'>{tags['EXIF FocalLengthIn35mmFilm']}mm (full frame equivalent)</p>\\n\"\n else:\n html += f\"<p class='focal-length'>{tags['EXIF FocalLengthIn35mmFilm']}mm</p>\\n\"\n else:\n if tags['EXIF FocalLength']:\n html += f\"<p class='focal-length'>{tags['EXIF FocalLength']}mm</p>\\n\"\n\n # ISO, Shutter speed, Apperture\n if tags['EXIF ISOSpeedRatings']:\n html += f\"<p class='iso'>ISO {tags['EXIF ISOSpeedRatings']}</p>\\n\"\n if tags['EXIF ExposureTime']:\n html += f\"<p class='shutter-speed'>{tags['EXIF ExposureTime']} Second(s)</p>\\n\"\n if tags['EXIF FNumber']:\n from fractions import Fraction\n tags['EXIF FNumber'] = str(float(Fraction(str(tags['EXIF FNumber'])))) # Convert aperture to str i.e. 6.3\n html += f\"<p class='aperture'>f{tags['EXIF FNumber']}</p>\\n\"\n\n # Camera body details\n if tags['Image Make'] and tags['Image Model']:\n html += f\"<p class='camera-type'>{tags['Image Make']} {tags['Image Model']}</p>\\n\"\n elif tags['Image Make']:\n html += f\"<p class='camera-type'>{tags['Image Make']}</p>\\n\"\n elif tags[\"Image Model\"]:\n html += f\"<p class='camera-type'>{tags['Image Model']}</p>\\n\"\n else:\n ...\n return html", "def tags():", "def describe_element(name, df):\n property_formats = {'f': 'float', 'u': 'uchar', 'i': 'int'}\n element = ['element ' + name + ' ' + str(len(df))]\n\n if name == 'face':\n element.append(\"property list uchar int vertex_indices\")\n\n else:\n for i in range(len(df.columns)):\n # get first letter of dtype to infer format\n f = property_formats[str(df.dtypes[i])[0]]\n element.append('property ' + f + ' ' + str(df.columns.values[i]))\n\n return element", "def export_html(self):\n self._svg_warning_displayed = False\n super(RichJupyterWidget, self).export_html()", "def get_template_tag(self):\n return \"{% dataset \" + self.cleantitle + \" %}\"", "def get_term_representations(self):\n return self.transformer.components_", "def code(self):\n t = ''\n for k, v in self.traits.iteritems():\n t += '{k}={v},'.format(k=k, v=v)\n\n return '{0}({1})'.format(self.__class__.__name__, t[:-1])", "def _repr_html_(self):\n return self.__repr__()", "def _repr_html_(self):\n return self.__repr__()", "def _repr_html_(self):\n return self.data.to_html()", "def get_html(self):\r\n context = {\r\n 'display_name': self.display_name_with_default,\r\n 'element_id': self.element_id,\r\n 'instructions_html': self.instructions,\r\n 'content_html': self._render_content()\r\n }\r\n\r\n return self.system.render_template('annotatable.html', context)", "def visualize_instance_html(self, exp, label, div_name, exp_object_name,\n text=True, opacity=True):\n raw_string = ' '.join(self.indexed_string.keys())\n keys = self.indexed_string.keys()\n if not text:\n return u''\n text = (raw_string.encode('utf-8', 'xmlcharrefreplace').decode('utf-8'))\n text = re.sub(r'[<>&]', '|', text)\n exp = [(x[0], keys.index(x[0]), x[1]) for x in exp]\n all_occurrences = list(itertools.chain.from_iterable(\n [itertools.product([x[0]], x[1], [x[2]]) for x in exp]))\n all_occurrences = [(x[0], int(x[1]), x[2]) for x in all_occurrences]\n ret = '''\n %s.show_raw_text(%s, %d, %s, %s, %s);\n ''' % (exp_object_name, json.dumps(all_occurrences), label,\n json.dumps(text), div_name, json.dumps(opacity))\n return ret", "def featurePropertiesOutput(feat):\n info = feat.getInfo()\n properties = info['properties']\n theid = info.get('id')\n if theid:\n stdout = '<h3>ID {}</h3></br>'.format(theid)\n else:\n stdout = '<h3>Feature has no ID</h3></br>'\n\n if properties:\n for prop, value in properties.items():\n stdout += '<b>{}</b>: {}</br>'.format(prop, value)\n else:\n stdout += '<b>Feature has no properties</b>'\n return stdout", "def __str__(self):\n return \"{}\".format(self.eTrait_)", "def __repr__(self):\n return super().__repr__().replace(\"<\", \"<CF \", 1)", "def visualize(fd, pos_tags=None):\n if pos_tags is not None:\n fd = {t: f for t, f in fd.items() if t.pos in pos_tags}\n color = {pos.tag: color.hex for pos, color in COLOR.items()}\n frequencies = sorted(fd.values())\n font_size = rescale(frequencies, range(75, 351))\n html = '\\n'.join(\n f'''<font\n color=\"{color[t.pos]}\"\n title=\"{t.lemma}/{t.pos} ({f})\"\n style=\"font-size: {font_size(f)}%\"\n >\n {t.lemma}\n </font>''' for t, f in fd.items()\n )\n return html", "def __repr__(self):\n info = ('class {}\\n'.format(self.__class__.__name__) +\n 'TFReccord {}\\n'.format(self.tffile) +\n 'Embeddings [{}, {}]\\n'.format(self.embeddings.shape[0], self.embeddings.shape[1]))\n return info", "def __repr__(self):\n return f\"Fact-Sheet: '{self.title}'\"", "def __repr__(self) -> str:\n return \"<Twilio.Supersim.V1.FleetPage>\"", "def __repr__(self):\n return (\n '<DCEL ('\n 'vertices:\\n {obj.vertices},\\n'\n 'edges:\\n {obj.edges},\\n'\n 'faces:\\n {obj.faces}>'.format(obj=self)\n )", "def _title(profile):\n if profile['operation'] == 'differential':\n p1, p2 = profile['profiles']\n return 'differential ({}, {})'.format(_title(p1), _title(p2))\n elif profile['operation'] == 'local feature':\n p = profile['profile']\n return 'local feature {} ({})'.format(profile['function'], _title(p))\n else:\n return ' '.join([str(x) for x in profile.values()])", "def _repr_(self):\n return \"Newform abelian subvariety %s of dimension %s of %s\" % (\n self.newform_label(), self.dimension(), self._ambient_repr())", "def depart_ltb_html(self, node):\n # Add close div\n self.depart_admonition(node)", "def get_feature_names():\n return ['UserID', 'SessionID', 'TaskName', 'Orientation', 'TapType'] + get_numerical_feature_names()", "def __repr__(self):\n\n return self._repr__base(rich_output=False)", "def __repr__(self):\n\n return self._repr__base(rich_output=False)", "def dftb_geom(name): \n dftb_geom = \"\"\"Geometry = GenFormat {\n <<< \"{{ title }}\"\n }\n \"\"\"\n return Environment().from_string(dftb_geom).render(title=name)", "def wrap_in_html(self,svgofmodel):\n html= '''<html>\\n%s\\n%s\\n%s\\n</g></g></g></svg></body></html>\\n'''\n svgbody= '''<body onload=\"javascript:setTimeout(&quot;location.reload(true);&quot;,%d);\">\\n''' % self.vrefreshms\n svgbody += \"<h4>GeoGad</h4>\"\n svghead= '<svg xmlns=\"http://www.w3.org/2000/svg\" version=\"1.2\" baseProfile=\"tiny\" width=\"%dpx\" height=\"%dpx\">\\n'\n svghead= svghead % (self.vboxX,self.vboxY)\n svghead+= '<rect x=\"1\" y=\"1\" width=\"%d\" height=\"%d\" fill=\"none\" stroke=\"blue\" stroke-width=\"4\"/>\\n'% (self.vboxX,self.vboxY)\n svghead+= '<g fill=\"none\" stroke=\"black\" stroke-width=\"%0.2f\">\\n' % self.vlinewidth\n svghead+= '<g transform=\"scale(%0.2f,%0.2f)\">\\n' % (self.vscaleX,self.vscaleY)\n svghead+= '<g transform=\"translate(%0.2f,%0.2f)\">\\n' % (self.vtranX,self.vtranY)\n return html % (svgbody,svghead,svgofmodel)", "def _repr_svg_(self):\n pass", "def display_data_features(self, data_obj: DataObject):\n data_title = f'CLASS: {data_obj.classID}\\nFILENAME: {data_obj.fname}\\nHAS JOINTS: {data_obj.has_joints}'\n data_title_x = 0\n data_title_y = 10 # mess with this\n labels_column_x = 20\n self.stdscr.addstr(data_title_y, data_title_x, data_title)\n\n feature_highlighted = None\n for i, feature in enumerate(self.feature_label_dict):\n feature_text = f'{feature}: {data_obj[feature]}'\n if self.feature_cursor_pos == i:\n feature_highlighted = feature\n self.stdscr.attron(curses.color_pair(1))\n self.stdscr.addstr(data_title_y + i + 2, 0, feature_text)\n self.stdscr.attroff(curses.color_pair(1))\n else:\n self.stdscr.addstr(data_title_y + i + 2, 0, feature_text)\n if self.feature_selected is True and self.label_selected is False:\n feature_labels = self.feature_label_dict[feature_highlighted]\n for j, label in enumerate(feature_labels):\n if self.label_cursor_pos == j:\n self.stdscr.attron(curses.color_pair(1))\n self.stdscr.addstr(data_title_y + j + 2, labels_column_x, label)\n self.stdscr.attroff(curses.color_pair(1))\n else:\n self.stdscr.addstr(data_title_y + j + 2, labels_column_x, label)\n return feature_highlighted", "def render(self):\n print(self._get_grid_representations())", "def _repr_html_(self):\n return self._frame._repr_html_()", "def graphs_kelly():\n return render_template(\"graphs-Kelly.html\")", "def get_family_repr(self):\r\n return \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (self.trf_id,\r\n self.trf_period,\r\n self.trf_array_length,\r\n self.trf_array_gc,\r\n self.trf_pvar,\r\n self.trf_gi,\r\n self.trf_l_ind,\r\n self.trf_r_ind,\r\n self.trf_chr,\r\n self.trf_repbase,\r\n self.trf_superfamily,\r\n self.trf_family,\r\n self.trf_subfamily)", "def svg(self) -> str:\n data = {\n 'x': self.x,\n 'y': self.y,\n 'width': self.width,\n 'height': self.height,\n 'text_x': self.x + 30,\n 'text_y': self.y + 20,\n 'name': self.person.name\n }\n return PERSON_BOX_TEMPLATE.format(**data)", "def __html__(self):\n return str(self)", "def _repr_html_(self):\n\n import numpy as np\n import matplotlib.pyplot as plt\n from .._tier9 import imshow\n\n\n size_in_pixels = np.prod(self.shape)\n size_in_bytes = size_in_pixels * self.dtype.itemsize\n\n labels = (self.dtype == np.uint32)\n\n # In case the image is 2D, 3D and larger than 100 pixels, turn on fancy view\n if len(self.shape) in (2, 3) and size_in_pixels >= 100:\n import matplotlib.pyplot as plt\n imshow(self,\n labels=labels,\n continue_drawing=True,\n colorbar=not labels)\n image = self._png_to_html(self._plt_to_png())\n else:\n return \"<pre>cle.array(\" + str(np.asarray(self)) + \", dtype=\" + str(self.dtype) + \")</pre>\"\n\n\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n size = \"{:.1f}\".format(size_in_bytes) + \" GB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" MB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" kB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" B\"\n\n histogram = \"\"\n\n if size_in_bytes < 100 * 1024 * 1024:\n if not labels:\n\n import numpy as np\n from .._tier2 import minimum_of_all_pixels, maximum_of_all_pixels\n from .._tier3 import histogram\n\n num_bins = 32\n\n h = np.asarray(histogram(self, num_bins=num_bins))\n\n plt.figure(figsize=(1.8, 1.2))\n plt.bar(range(0, len(h)), h)\n\n # hide axis text\n # https://stackoverflow.com/questions/2176424/hiding-axis-text-in-matplotlib-plots\n # https://pythonguides.com/matplotlib-remove-tick-labels\n frame1 = plt.gca()\n frame1.axes.xaxis.set_ticklabels([])\n frame1.axes.yaxis.set_ticklabels([])\n plt.tick_params(left=False, bottom=False)\n\n histogram = self._png_to_html(self._plt_to_png())\n\n min_max = \"<tr><td>min</td><td>\" + str(self.min()) + \"</td></tr>\" + \\\n \"<tr><td>max</td><td>\" + str(self.max()) + \"</td></tr>\"\n\n else:\n\n min_max = \"\"\n\n all = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n image,\n \"</td>\",\n \"<td style=\\\"text-align: center; vertical-align: top;\\\">\",\n \"<b><a href=\\\"https://github.com/clEsperanto/pyclesperanto_prototype\\\" target=\\\"_blank\\\">cle._</a> image</b><br/>\",\n \"<table>\",\n \"<tr><td>shape</td><td>\" + str(self.shape).replace(\" \", \"&nbsp;\") + \"</td></tr>\",\n \"<tr><td>dtype</td><td>\" + str(self.dtype) + \"</td></tr>\",\n \"<tr><td>size</td><td>\" + size + \"</td></tr>\",\n min_max,\n \"</table>\",\n histogram,\n \"</td>\",\n \"</tr>\",\n \"</table>\",\n ]\n\n return \"\\n\".join(all)", "def _repr_(self):\n return \"Projective hypersurface defined by %s in %s\"%(\n self.defining_polynomial(), self.ambient_space())", "def show_df_by_tags(df, tags):\n return st.dataframe(filter_df(df, tags)) if not 'Expert' in df.columns else st.dataframe(filter_df(df, tags), height=150, width=450)", "def __repr__(self):\r\n return f\"{self.name} {self.status_name} {self.window_start} {self.wiki_url} {self.pad_location} {self.image}\"", "def render(self, mode='human'):", "def highlight_faces(image, faces, output_filename, terminal_print=True):\n im = Image.open(image)\n draw = ImageDraw.Draw(im)\n\n for (face_ind, face) in enumerate(faces):\n\n # compute emotions\n list_emotion_scores = [face.sorrow_likelihood,\n face.joy_likelihood,\n face.anger_likelihood,\n face.surprise_likelihood]\n\n list_emotions = [\"SORROW\",\n \"JOY\",\n \"ANGER\",\n \"SURPRISE\"]\n\n string_label = generate_string_label(list_emotions, list_emotion_scores)\n\n if terminal_print:\n # print emotions on terminal\n print(\"\\n\")\n print(\"-----------------------\")\n print(\"Face {}\".format(face_ind))\n\n for (crrt_emotion, crrt_score) in zip(list_emotions, list_emotion_scores):\n print(\"{}: {}\".format(crrt_emotion, crrt_score))\n\n print(string_label)\n\n print(\"-----------------------\")\n\n # draw box around face\n box = [(vertex.x, vertex.y)\n for vertex in face.bounding_poly.vertices]\n draw.line(box + [box[0]], width=5, fill='#00ff00')\n\n # add legend in the face box\n fontsize = 35\n font = ImageFont.truetype(\"/usr/share/fonts/truetype/freefont/FreeMono.ttf\", fontsize)\n\n offset = 5\n heigth_text = 40\n length_text = box[1][0] - box[0][0] - 2 * offset\n draw.rectangle(((box[0][0] + offset, box[0][1] + offset), (box[0][0] + length_text + offset, box[0][1] + heigth_text + offset)), fill=\"black\")\n draw.text((box[0][0] + offset, box[0][1] + offset), string_label, font=font, fill=(255, 255, 255, 255))\n\n # highlight significant points\n point_nbr = 0\n half_width_sqare = 2\n\n list_point_coords = []\n\n for point in face.landmarks:\n x = point.position.x\n y = point.position.y\n\n list_point_coords.append((x, y))\n\n draw.rectangle(((x - half_width_sqare, y - half_width_sqare), (x + half_width_sqare, y + half_width_sqare)), fill=\"red\")\n\n # fontsize = 15\n # font = ImageFont.truetype(\"/usr/share/fonts/truetype/freefont/FreeMono.ttf\", fontsize)\n # draw.text((x, y), str(point_nbr), font=font, fill=(255, 255, 0, 0))\n\n point_nbr += 1\n\n all_lists_points = [\n [10, 11, 9],\n [10, 12, 11],\n [14, 7, 13, 15],\n [7, 6],\n [14, 6, 13, 7, 14],\n [16, 17, 18, 19],\n [21, 22, 23, 24],\n [30, 6],\n ]\n\n for crrt_list_points in all_lists_points:\n draw_line_list_points(draw, crrt_list_points, list_point_coords)\n\n draw_line_list_points(draw, [2, 26, 3], list_point_coords, close=False)\n draw_line_list_points(draw, [4, 27, 5], list_point_coords, close=False)\n draw_line_list_points(draw, [10, 8, 11], list_point_coords, close=False)\n\n im.save(output_filename)", "def createFeatureFrame(mode):\r\n \r\n text = textFeature(mode)\r\n sentiment = clfFeature('sentiment', mode)\r\n actors = clfFeature('actors', mode)\r\n directors = clfFeature('directors', mode)\r\n genre = clfFeature('genre', mode)\r\n titles = clfFeature('titles', mode)\r\n featureframe = pd.concat([text, sentiment, actors, directors, genre, titles], axis=1)\r\n \r\n return featureframe", "def get_features(data_frame, n_components):\n n_features = 3\n if n_components == 2:\n n_features = 2\n col = data_frame.columns.tolist()[:-1]\n com = 1\n components = []\n index = []\n for n_iter in range(n_features):\n print(\"Choose the {}-feature that you want to plot: \".format(n_iter + 1))\n for i in range(len(col)):\n if col[i] == -1:\n print('\\033[91m'\" {}-> SELECTED \\033[0m\".format(i + 1))\n else:\n print(\"{} -> {}\".format(i + 1, col[i]))\n\n try:\n com = int(input(\"write the left index of the feature: \"))\n except:\n print('Invalid age, please enter a number')\n components.append(col[com - 1])\n index.append(com - 1)\n col[com - 1] = -1\n\n return components, index", "def get_visualizations( self, dataset ):\n\n return [ 'phyloviz' ]", "def __str__(self):\n tag = []\n for key in self.tags:\n if key == 'label':\n self.type = self.tags[key]\n else:\n try:\n tag.append(\"%s=%0.3f\" % (str(key), self.tags[key]))\n except TypeError:\n tag.append(\"%s=%s\" % (str(key), str(self.tags[key])))\n \n \n tag = \";\".join(tag)\n dat = [self.chrom, self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end, self.type, self.size, \\\n tag]\n\n return \"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\\t{9}\".format(*dat) \\\n .replace(\"None\", \".\")", "def plugin_data_repr(self):", "def __repr__(self):\n return '<HomepageFeatures(title=%r, body=%r, img_path_xs=%r, img_path_sm=%r, img_path_md=%r, img_path_lg=%r, is_active=%r)>' % (self.title, self.body, self.img_path_xs, self.img_path_sm, self.img_path_md, self.img_path_lg, self.is_active)", "def _repr_(self):\n return \"Category of hyperbolic models of {}\".format(self.base())", "def _repr_(self):\n s = '<'\n s += ','.join([ str(v.index()) for v in self.Vrepresentation() ])\n s += '>'\n return s", "def repr_subfigure(self):\n default_kwargs = {'placement': self.subfig_placement,\n 'width': self.subfig_width,\n 'caption': self.caption,\n 'label': self.label}\n\n myfig = self.extension_mapping[self.extension]()\n\n return self.subfig_str.format(myfig=myfig, **default_kwargs)", "def __str__(self):\n return 'GradientAnisotropicDiffusion:\\n' \\\n ' time_step: {self.time_step}\\n' \\\n ' conductance: {self.conductance}\\n' \\\n ' conductance_scaling_update_interval: {self.conductance_scaling_update_interval}\\n' \\\n ' no_iterations: {self.no_iterations}\\n' \\\n .format(self=self)", "def show_feature_summary(df, colname, display_uniques=False):\n\tprint('Details of feature:',colname)\n\tprint(' - datatype:',df[colname].dtypes)\n\tprint(' - col.size:',df[colname].shape)\n\tprint(' - NaN.vals:',df[colname].isnull().sum())\n\tif (display_uniques): print(' - uniqvals:',get_unique_values(df, colname))\n\tif (display_uniques): print(' - cnt.vals:',get_unique_counts(df, colname))\n\tprint(\"\\n\")", "def __repr__(self):\n\n return f\"Ufd(\"\\\n f\"title=\\\"{self.title}\\\",\"\\\n f\" icon=\\\"{self.icon}\\\",\"\\\n f\" show_hidden={self.show_hidden},\"\\\n f\" include_files={self.include_files},\"\\\n f\" multiselect={self.multiselect},\"\\\n f\" select_dirs={self.select_dirs},\"\\\n f\" select_files={self.select_files},\"\\\n f\" unix_delimiter={self.unix_delimiter})\"\\\n f\" stdout={self.stdout})\"\\\n f\" @ {hex(id(self))}\"", "def __repr__(self) -> str:\n return_string = str()\n\n return_string += f\"Representation of dataset with {len(self.internal_types)} elements:\\n\"\n return_string += f\"List of categories:\\t{self.internal_types}\\n\"\n return_string += f\"First and last 5 features:\\n\"\n for i in range(5):\n return_string += f\"\\t{self.internal_data[i]}\\n\"\n return_string += f\"\\t...\\n\"\n for i in range(4, -1, -1):\n return_string += f\"\\t{self.internal_data[i]}\\n\"\n return_string += \"For more information, use debugger.\"\n\n return return_string", "def display_facet(model_name, vertices, faces, plot_type, display_normals=False, scale=0.2):\n # Separate the coordinates of the vertices\n x = vertices[:, 0]\n y = vertices[:, 1]\n z = vertices[:, 2]\n\n # Display the model\n ax = Axes3D(plt.figure())\n if plot_type == 'Facet':\n ax.plot_trisurf(x, y, z, triangles=faces, color=(1, 1, 1, 1), edgecolor='gray')\n elif plot_type == 'Wireframe':\n ax.plot_trisurf(x, y, z, triangles=faces, color='none', edgecolor='black')\n ax.grid(True)\n set_equal(ax)\n\n ax.set_title(model_name, size='14')\n ax.set_xlabel('X', size='12')\n ax.set_ylabel('Y', size='12')\n ax.set_zlabel('Z', size='12')\n\n # Set the tick label size\n ax.tick_params(labelsize=12)\n\n if display_normals:\n\n # Vector from origin to vertices\n r = zeros([vertices.shape[0], 3])\n\n for i in range(vertices.shape[0]):\n r[i] = [vertices[i][0], vertices[i][1], vertices[i][2]]\n\n for i in range(faces.shape[0]):\n a = r[faces[i][1]] - r[faces[i][0]]\n b = r[faces[i][2]] - r[faces[i][1]]\n\n # Outward normal\n normal = cross(a, b) + 0.\n\n # Scale the size of the arrow to be displayed\n normal *= scale\n\n # Put the arrow at the center of the facet\n mean_r = (r[faces[i][0]] + r[faces[i][1]] + r[faces[i][2]]) / 3.0\n\n # Get the arrow for the normal\n arrow = Arrow3D([mean_r[0], mean_r[0] + normal[0]], [mean_r[1], mean_r[1] + normal[1]],\n [mean_r[2], mean_r[2] + normal[2]], mutation_scale=10, lw=1, arrowstyle=\"-|>\", color=\"r\")\n ax.add_artist(arrow)\n\n plt.show()", "def __html__(self) -> str:\n components = [\n f'{self.name}' if self.name else '',\n f'{self.repository}',\n ]\n return ', '.join([component for component in components if component])", "def _repr_html_(self):\n return util.tree_sequence_html(self)", "def __str__(self):\n return '<TuebingenMEG: %i samples, %i timepoints, %i channels>' \\\n % (self.nsamples, self.ntimepoints, len(self.channelids))", "def __repr__(self):\n #if self.parent: self.print_siblings()\n self.env.render()\n #self._print_children()\n return ''", "def disp(df):\n display(HTML(df.to_html(index=False)))", "def info(cls):\n return 'Xray plots'", "def vgg16(tensorized, **kwargs):\n return _vgg('vgg16', 'D', False, tensorized, **kwargs)", "def show_add_tag():\n\n return render_template(\"tags/create_tag.html\")", "def __repr__(self):\n return \"<Glove_object num_tokens.{} vec_dim.{}>\".format(self.num_tokens, self.dimension)", "def __str__(self):\n return_string = self.name + \"\\n\" + str(self.traits)\n\n return return_string", "def tagviews(tab,text,x,y):\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n viseg=cv2.putText(tab,text,(x, y), font,0.3,white,1)\r\n return viseg", "def facets(self, *args, **kwargs) -> Any:\n pass" ]
[ "0.5747352", "0.5307675", "0.5240231", "0.51319224", "0.5109449", "0.50896925", "0.50568515", "0.5052512", "0.49906644", "0.49706864", "0.49553403", "0.4881878", "0.48760134", "0.48600367", "0.48523626", "0.48476768", "0.48415962", "0.4828744", "0.48147592", "0.48147592", "0.47993487", "0.47962895", "0.47950158", "0.47798023", "0.4772765", "0.47498557", "0.47449854", "0.47425482", "0.47322235", "0.47120026", "0.47047177", "0.47033414", "0.46988562", "0.467399", "0.46711767", "0.46659702", "0.4656231", "0.46557105", "0.4650745", "0.4647869", "0.4647869", "0.46467742", "0.4640816", "0.46390423", "0.4613031", "0.46127227", "0.4609037", "0.46079636", "0.46077785", "0.4607198", "0.45955825", "0.45918816", "0.45888326", "0.45871332", "0.45803693", "0.45731953", "0.4566698", "0.4566698", "0.45615384", "0.45601913", "0.45555076", "0.45546812", "0.45500928", "0.45466152", "0.45459723", "0.45443428", "0.4537449", "0.45368305", "0.45361748", "0.45359376", "0.4531019", "0.45252082", "0.4524984", "0.45223972", "0.4518903", "0.4516684", "0.45141336", "0.45113143", "0.45086077", "0.45031878", "0.45016375", "0.45012504", "0.4500491", "0.44988868", "0.44962525", "0.4495155", "0.4488067", "0.44855222", "0.44847617", "0.44775084", "0.4469307", "0.4468035", "0.44662505", "0.44586495", "0.44563362", "0.44549912", "0.44520074", "0.44472295", "0.44357324", "0.44352126" ]
0.62843364
0
BCE summed over the voxels intensities.
def bce_on_intensities(x, recon_x, scale_b): bce = torch.sum( F.binary_cross_entropy(recon_x, x) / scale_b.exp() + 2 * scale_b) return bce
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _vce(self):\n sum = 0.0\n for sail in self.sails:\n cl2 = sail.cl(self.awa)**2\n cd2 = sail.cd(self.awa)**2\n sum += sail.area * sail.vce * sail.bk * np.sqrt(cl2+cd2)\n self._area()\n deltaCH = 0 if self.sails[1].up!=True else (1-self.ftj)*0.05*self.sails[1].IG\n Zce = sum/(self.area*np.sqrt(self.cl**2+self.cd**2)) - deltaCH\n return (Zce*(1-0.203*(1-self.flat)-0.451*(1-self.flat)*(1-self.fractionality)))", "def barycenter(self):\n _value = (sum((v[0] for v in self.objects.values())),sum((v[1] for v in self.objects.values())))\n if self.objects:\n _value = (_value[0]/len(self.objects), _value[1]/len(self.objects))\n self.bc=_value\n return _value", "def energy_due_to_vibrations(self, T, cf):\n E = 0.0\n for key, value in self.eci_per_kbT:\n E += value * cf[key] * kB * T\n return E", "def SumaryCompras(vj):\n\n vj.CompasCUC = vj.MontoPrecios = vj.GanancPrecios = 0.0\n\n for row in vj.tbCompras.rows.values():\n prec = vj.MD.Convert( row.precio, row.moneda, MD.Cuc ) # Siempre lleva el precio a CUC\n\n vj.MontoPrecios += ( prec * row.count )\n vj.CompasCUC += row.valCUC\n\n UpdateRecupIdx(vj)\n vj.GanancPrecios = vj.MontoPrecios - vj.MontoInvers", "def nE(self):\n return int(self.vnE.sum())", "def vol_rameaux(x): \r\n return sum([vol_rameau_cat(x, cat) for cat in ['small', 'medium', 'large']])", "def amorphous(x,nw,sizes):\n v = 0\n \n for i in range(1,nw+1):\n v = impurityBis(x,i,sizes[i-1])\n \n return v", "def sum(self):\n return self.vsum", "def nbetas(self):\n return sum(self.beta)", "def _vB0(self,x):\n return 2.4e3*x**0.5/(2+x**0.5)+8.e2", "def val_vec_repr_to_bivector(x):\n t_val = np.zeros(32)\n t_val[1] = x[0]\n t_val[2] = x[1]\n t_val[3] = x[2]\n B_val = gmt_func(t_val, ninf_val)\n B_val[6] += x[3]\n B_val[7] += x[4]\n B_val[10] += x[5]\n return B_val", "def elec_balance(index):\n t = index[0]\n return (\n pulp.lpSum([component_output[i, t] for i in index_elec_out])\n - pulp.lpSum([component_input[i, t] for i in index_elec_in])\n + elec_from_grid[t]\n - elec_to_grid[t]\n + pulp.lpSum([storage_disch[i, t] for i in elec_storage_names])\n - pulp.lpSum([storage_ch[i, t] for i in elec_storage_names])\n + elec_unserve[t]\n - elec_dump[t]\n == forecast[\"elec_load\"][t]\n )", "def woe_iv_continuous(self):\n df = self.predictors.copy()\n df['target'] = self.target.copy()\n IV_dict = {}\n woe_dict = {}\n\n for col in self.predictors.columns:\n # binning values\n bins = np.linspace(df[col].min()-0.1, df[col].max()+0.1, int(0.05* self.predictors.shape[0])) # each bin should have at least 5% of the observation\n groups = df.groupby(np.digitize(df[col], bins))\n df[col] = pd.cut(df[col], bins)\n\n # getting class counts for each bin\n count_series = df.groupby([col, 'target']).size()\n new_df = count_series.to_frame(name = 'size').reset_index()\n\n new_df['size'] = new_df['size'] + 0.5\n df1 = new_df[new_df['target']==0].reset_index(drop=True)\n df2 = new_df[new_df['target']==1].reset_index(drop=True)\n df1['size1'] = df2['size']\n new_df = df1.drop(columns=['target'])\n sum_ = new_df['size'].sum()\n sum1 = new_df['size1'].sum()\n # Calculate woe and IV\n new_df['woe'] = np.log((new_df['size']/sum_)/(new_df['size1']/sum1))\n new_df['IV'] = ((new_df['size']/sum_) - (new_df['size1']/sum1)) * new_df['woe']\n new_df = new_df.replace([np.inf, -np.inf], np.nan)\n new_df.dropna(inplace=True)\n woe_dict[col] = new_df.drop(columns=['size','size1'])\n IV_dict[col] = new_df['IV'].sum()\n return woe_dict, IV_dict", "def _etaE(self,x):\n return self._etaE_cool(x) + self._etaE_hot(x)", "def get_total_BMA_effect_size(self):\n \n if self.total_bma_es is None:\n # clean up these long expressions on Isle 2\n log_evidences = [self.results[kernel].summary(b=self.b)['evidence']['md'] \n for kernel in self.kernel_dict.keys()] + \\\n [self.results[kernel].summary(b=self.b)['evidence']['mc'] \n for kernel in self.kernel_dict.keys()]\n \n M = len(log_evidences)\n Z = logSumExp(log_evidences)\n evidences = np.exp(log_evidences - Z)\n disc_stats = [self.results[kernel].summary(b=self.b)['es_disc_stats'] \n for kernel in self.kernel_dict.keys()]\n nsamples = 50000\n samples = list() \n for i in range(int(M/2)):\n samples += list(np.random.normal(loc=disc_stats[i][0], \n scale=disc_stats[i][1], \n size=int(nsamples*evidences[i])))\n samples += list(np.zeros(nsamples - len(samples)))\n \n if np.sum(np.abs(samples))==0:\n xrange = np.linspace(-2, 2, 500)\n ix = np.argmin((xrange-self.b)**2)\n es_bma = np.zeros((500))\n es_bma[ix] = 1.0/ (xrange[1] - xrange[0])\n else: \n kde_fit = stats.gaussian_kde(samples, bw_method='silverman')\n xrange = np.linspace(np.min(samples), np.max(samples), 500)\n es_bma = kde_fit(xrange)\n self.total_bma_es = np.sum(xrange*es_bma) * (xrange[1]-xrange[0])\n self.total_bma_pdf = (xrange, es_bma)\n return self.total_bma_es", "def calculate_E(self):\n \n E = 0\n for i in xrange(self.size):\n Ei = self.h[i]\n Ei += 0.5*sum((1 if self.spins[j] else -1)*self.J[i,j] for j in self.adjacency[i])\n if not self.spins[i]:\n Ei *= -1\n E += Ei\n \n return E", "def bbxes_data(img):\n _, _, stats, centr = cv2.connectedComponentsWithStats(img)\n # (xCentr, yCentr, area, width, height, xStart, xEnd , yStart, yEnd )\n return sorted([(cent[0], cent[1], stat[4], stat[2], stat[3], stat[0], stat[0] + stat[2], stat[1], stat[1] + stat[3])\n for cent, stat in zip(centr[1:], stats[1:])])", "def computeCMBY(d0):\n # N.B. Reshaping operations required to go between 2D pixel arrays and \n # 1D vector (for linear system)\n d2 = 0\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny,nx))\n d1 *= ninvs[freq]\n a_l = fft.fft(d1,axes=[-2,-1])\n a_l *= beams[freq]*precond_2d\n d1 = numpy.real(fft.ifft(a_l,axes=[-2,-1],normalize=True))\n d1 = numpy.reshape(d1,(nx*ny))\n d2 += d1\n return d2", "def b_plus_bstar(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 and self.prob.Y[i] == 1:\n ayxx = 0\n for j in range(self.prob.num):\n ayxx += (self.alphas[j] + self.etas[j]) * self.prob.Y[j] * self.prob.xkernel(self.prob.X[j],\n self.prob.X[i])\n abcxx = 0\n for j in range(self.prob.num):\n abcxx += (self.alphas[j] + self.deltas[j]) * self.prob.xkernel(self.prob.X[j], self.prob.X[i])\n abcxx *= (1 / self.prob.gamma)\n running_total += 1 - abcxx - ayxx\n return running_total", "def acc_b_v(self):\r\n return self._acc_b_v", "def cal_et(self):\r\n\r\n for ind in range(2**(4*self.k)):\r\n i=0\r\n num = int(bin(ind)[2:])\r\n aux = listarNum(num)\r\n list_num=np.array([])\r\n while i < 4*self.k:\r\n if len(aux) < 4*self.k-i:\r\n list_num=np.append(list_num, [0.])\r\n elif len(aux)==4*self.k-i:\r\n list_num=np.append(list_num, aux)\r\n i=i+1\r\n \"\"\"\r\n reversed_list_num = list_num[::-1]\r\n self.et[ind]=reversed_list_num\r\n \"\"\"\r\n self.et[ind]=list_num", "def wce(B):\n return eme*B", "def woe_iv_categ(self):\n df = self.predictors.copy()\n df['target'] = self.target.copy()\n IV_dict = {}\n woe_dict = {}\n\n for col in self.predictors.columns:\n # binning values\n bins = np.linspace(df[col].min()-0.1, df[col].max()+0.1, len(set(df[col]))) # each bin should have at least 5% of the observation\n groups = df.groupby(np.digitize(df[col], bins))\n df[col] = pd.cut(df[col], bins)\n\n # getting class counts for each bin\n count_series = df.groupby([col, 'target']).size()\n new_df = count_series.to_frame(name = 'size').reset_index()\n\n new_df['size'] = new_df['size'] + 0.5\n df1 = new_df[new_df['target']==0].reset_index(drop=True)\n df2 = new_df[new_df['target']==1].reset_index(drop=True)\n df1['size1'] = df2['size']\n new_df = df1.drop(columns=['target'])\n sum_ = new_df['size'].sum()\n sum1 = new_df['size1'].sum()\n # Calculate woe and IV\n new_df['woe'] = np.log((new_df['size']/sum_)/(new_df['size1']/sum1))\n new_df['IV'] = ((new_df['size']/sum_) - (new_df['size1']/sum1)) * new_df['woe']\n new_df = new_df.replace([np.inf, -np.inf], np.nan)\n new_df.dropna(inplace=True)\n woe_dict[col] = new_df.drop(columns=['size','size1'])\n IV_dict[col] = new_df['IV'].sum()\n return woe_dict, IV_dict", "def block_sum(i, bins, C, n_u):\n s= 0.0\n for j in range(bins[i], bins[i+1]):\n for k in range(bins[i], bins[i+1]):\n s+= C[j][k]*n_u[j]*n_u[k]\n return s", "def Ensemble_field(self,c_value):\n if (np.sum(self.cmask) != len(c_value)):\n raise ValueError(\"weights should have the same size as cmask\")\n \n Lbox,RG,xpk = self.attrs['Lbox'],self.RG,self.xpk\n H0,F = self.cosmo.H0,self.cosmo.F\n \n if self.xij_tensor_inv is None:\n print (\"Building Xij matrix ...\")\n self.build_Xij_inv_matrix() \n \n kgrid = initialize_kgrid(self.attrs['Nmesh'],Lbox)\n kmag_grid = np.linalg.norm(kgrid,axis=3)\n k2 = kmag_grid**2\n k2[0,0,0]=1\n \n # ----------------------------------------------------\n phase = -np.sum(kgrid*xpk,axis=3) \n ampl_field = self.cosmo.Pk_smoothed(kmag_grid,RG)*(1/Lbox**3)\n dk_field = np.complex128(np.zeros_like(ampl_field))\n \n weights = np.einsum('ij,j->i',self.xij_tensor_inv,c_value)\n \n cspace = np.arange(0,18)\n for w,i in zip(weights,cspace[self.cmask]):\n dk_field += w * Hhats[i](kgrid,k2,H0,F) * ampl_field * np.exp(1j*phase)\n\n dk_field[0,0,0] = 0.0 # Note that the mean overdensity of the box should be zero\n \n #---------------------------------------------------- \n dx_field = (self.attrs['Nmesh']**3)*np.fft.ifftn(dk_field).real\n\n return dx_field", "def nC(self):\n return int(self.vnC.prod())", "def energy_balance_func(self):\n residual = []\n T_in = T_mix_ph(self.inl[0].get_flow(), T0=self.inl[0].T.val_SI)\n for o in self.outl:\n residual += [T_in - T_mix_ph(o.get_flow(), T0=o.T.val_SI)]\n return residual", "def amount_5_conto_energia(self, production, verbose=False):\n en_autocons = round(production*self.perc_autocons,2) \n energia_immessa_in_rete = round(production - en_autocons, 2)\n tot_incent_autocons = round(en_autocons*self.incent_5_autocons,3)\n tot_incent_omnic = round(self.incent_5_omnic*energia_immessa_in_rete,3)\n tot_incent_EU = round(production*self.incent_5_EU,2)\n\n tot_5 = tot_incent_autocons + tot_incent_omnic + tot_incent_EU - self.spese_5\n if verbose:\n print( \"production\" , production)\n print( \"tot_incent_autocons\", tot_incent_autocons)\n print( \"tot_incent_omnic\" , tot_incent_omnic)\n print( \"tot_incent_EU\" , tot_incent_EU)\n print( \"spese\" , spese)\n return tot_5", "def bic(self, x):\n x = self.check_size(x)\n n = x.shape[0]\n\n # Free parameters for covariance, means and mixture components\n free_params = self.n_features * self.n_components + self.n_features + self.n_components - 1\n\n bic = -2. * self.__score(x, as_average=False).mean() * n + free_params * tf.math.log(n)\n\n return bic", "def _etaE_cool(self,x):\n return self._eta_sfr_scaling(x,'E_cool')", "def SumaryVentas(vj):\n\n vj.MontoVentas = vj.GanacVentas = 0.0 # Inicializa sumarios de ventas\n vj.MontoConsumo = vj.GanacConsumo = vj.MontoConsumoRecp = 0.0 # Inicializa sumarios de items de consumo\n vj.NumChgPrecio = vj.MontoChgPrecio = 0.0 # Inicializa sumarios de cambios de precio\n vj.NumDevoluc = vj.MontoDevoluc = 0.0 # Inicializa sumarios de devoluciones\n vj.NumSinPagar = vj.MontoSinPagar = 0.0 # Inicializa sumarios de Items sin pagar \n vj.NumSinVender = vj.MontoSinVender = 0.0 # Inicializa sumarios de Items sin vender \n\n GroupVentas = {} # Dicionario para contar las ventas por preductos\n\n for idVenta, row in vj.tbVentas.rows.items():\n Cant = row.count\n idProd = row.idProd\n\n if idProd in GroupVentas: GroupVentas[idProd] += Cant # Acumula la cantidad de ventas por producto\n else: GroupVentas[idProd] = Cant\n\n rowProd = vj.tbCompras.rows.get(idProd) # Busca datos de item asociado a la venta\n if not rowProd: continue\n\n montoProd = vj.Cnv( Cant*rowProd.precio, rowProd.moneda, MD.Cuc ) # Monto al precio del item en CUC\n\n if row.vendedor == vj.Vendedores[0]: # Item para consumo\n costo = Cant * rowProd.valCucItem\n costoRcp = costo * vj.RecupIdx\n\n vj.MontoConsumo += costo # Acumula costos de compra\n vj.MontoConsumoRecp += costoRcp # Acumula costos de recuperación\n vj.GanacConsumo += ( montoProd-costoRcp )\n continue # No hace más analisis para esa venta\n\n precioVenta = vj.Cnv( row.precio, row.moneda, MD.Cuc) # Lleva precio de la venta a CUC\n montoVenta = Cant * precioVenta # Calcula el monto de la venta en CUC\n\n vj.MontoVentas += montoVenta # Acumula todos los montos de las ventas\n\n if montoProd != montoVenta: # Cambio el precio del producto en la venta\n vj.NumChgPrecio += Cant # Acumula # de items que cambian de precio\n vj.MontoChgPrecio += (montoVenta-montoProd) * Cant # Acumula las diferencias de precio\n\n if len(row.comentario): # Si hay comentarios\n matches = reNDevuelto.findall( row.comentario ) # Busca la cantidad de items devueltos\n for match in matches: # Para cada devolución\n Num = int(match) # Convierte a entero la cantidad de devoluciones\n\n vj.NumDevoluc += Num # Acumula de cantidad de devoluciones\n vj.MontoDevoluc += ( Num*precioVenta ) # Acumula el precio de las devoluciones\n\n Pago = GetPagado( vj, idVenta, MD.Cuc ) # Determina la cantidad de la venta pagada\n SinPagar = montoVenta - Pago # Calcula lo que queda sin pagar\n\n if precioVenta!=0: # Si ya hay un precio establecido\n vj.NumSinPagar += SinPagar/precioVenta # Acumula el # de items sin pagar\n\n vj.MontoSinPagar += SinPagar # Acumula el monto sin pagar\n\n vj.GanacVentas = vj.MontoVentas - vj.MontoInvers # Calcula las ganancias totales por ventas\n\n for idProd, row in vj.tbCompras.rows.items(): # Recorre todos los productos\n Resto = row.count # Inicializa productos que quedan (todos)\n if idProd in GroupVentas: Resto -= GroupVentas[idProd] # Quita la cantidad de productos vendidos\n\n if Resto <= 0: continue # Si todos estan vendidos no hace mas nada\n\n Precio = vj.Cnv( row.precio, row.moneda, MD.Cuc ) # Lleva el precio del producto a cuc\n\n vj.NumSinVender += Resto # Acumula la cantidad de productos sin vender\n vj.MontoSinVender += ( Resto*Precio ) # Acumula el precio de los productos sin vender", "def _ebit(self):\n return self.net_income + self.tax_expense + self.interest_expense", "def energies_kev(self):\n\n if not self.is_calibrated:\n raise UncalibratedError('Spectrum is not calibrated')\n else:\n return bin_centers_from_edges(self.bin_edges_kev)", "def vbmstep(self):\n for k in range(self.k):\n self.beta_k[k] = self.beta_0 + self.counts[k]\n self.m_k[k] = (1 / self.beta_k[k]) * (self.beta_0 * self.m_0 +\n self.counts[k] * self.means[k])\n\n tmp = (self.beta_0 * self.counts[k]) / (self.beta_0 + self.counts[k])\n tmp2 = (self.means[k] - self.m_0)\n tmp = np.linalg.inv(self.W_0) + self.counts[k] * self.covars[k] + tmp * tmp2 @ tmp2.T\n self.w_k[k] = np.linalg.inv(tmp)\n self.nu_k[k] = self.nu_0 + self.counts[k]\n self.alpha_k[k] = self.alpha_0[k] + self.counts[k]", "def TB(t,init,rhoS,deltaSC,rhoC,deltaCB,rhoB):\n\n y=SCB(t,init,rhoS,deltaSC,rhoC,deltaCB,rhoB)\n T=np.sum(y,axis=0)\n Y=np.vstack((T,y[2]))\n return(Y)", "def tot_KE(V):\n KE = 0.0\n for i in range(len(V)):\n for j in range(3):\n KE += (V[i, j] * V[i, j]) / 2.0\n return KE", "def x(self):\n return np.sum(self.bbox, 0)[0] / 2", "def cost_b_v(self):\n return self._cost_b_v", "def get_exchanged_euros(model):\n exchanged_euros = np.sum([v.exchanged_euros for k, v in model.schedule.agents_by_type['Customer'].items()])\n return round(float(np.sum(exchanged_euros)), 2)", "def calculateElementBoundaryCoefficients(self):\n pass", "def compute_bias(ics, vbc):\n import os, time\n from seren3.array import SimArray\n \n # Compute size of grid and boxsize (for this patch)\n N = vbc.shape[0]\n boxsize = ics.boxsize.in_units(\"Mpc a h**-1\") * (float(N) / float(ics.header.N))\n\n # Compute vbc @ z=1000\n z = ics.z\n rms = vbc_rms(vbc)\n rms_recom = rms * (1001./z)\n\n # Check for PS and run CICsASS if needed\n fname_vbc0 = vbc_ps_fname(0., z, boxsize)\n if not os.path.isfile(fname_vbc0):\n exit_code = run_cicsass(boxsize, z, 0., fname_vbc0)\n\n fname_vbcrecom = vbc_ps_fname(rms_recom, z, boxsize)\n if not os.path.isfile(fname_vbcrecom):\n exit_code = run_cicsass(boxsize, z, rms_recom, fname_vbcrecom)\n\n # Load power spectra and compute bias\n ps_vbc0 = np.loadtxt(fname_vbc0, unpack=True)\n ps_vbcrecom = np.loadtxt(fname_vbcrecom, unpack=True)\n\n # Should have same lenghts if finished writing\n count = 0\n while len(ps_vbcrecom[1]) != len(ps_vbc0[1]):\n count += 1\n if count > 10:\n raise Exception(\"Reached sleep limit. Filesizes still differ\")\n time.sleep(5)\n ps_vbc0 = np.loadtxt(fname_vbc0, unpack=True)\n ps_vbcrecom = np.loadtxt(fname_vbcrecom, unpack=True)\n\n #CDM bias\n b_cdm = ps_vbcrecom[1] / ps_vbc0[1]\n # Baryon bias\n b_b = ps_vbcrecom[2] / ps_vbc0[2]\n # Wavenumber\n k_bias = SimArray(ps_vbcrecom[0] / ics.cosmo[\"h\"], \"h Mpc**-1\")\n\n return k_bias, b_cdm, b_b", "def tc(self):\n return np.sum(self.tcs)", "def summation(self):\n return sum(self.read_ints())", "def test00(self):\n N = self.N\n a = bcolz.fromiter(xrange(N), dtype=np.float64, count=N,\n rootdir=self.rootdir)\n l, s = 0, 0\n for block in bcolz.iterblocks(a):\n l += len(block)\n s += block.sum()\n self.assertEqual(l, N)\n # as per Gauss summation formula\n self.assertEqual(s, (N - 1) * (N / 2))", "def tot_neg_elbo(self):\n\n\n self.neg_elbo = tf.reduce_sum([-self.experts[i].elbo((self.X[self.partition[i]],self.Y[self.partition[i]])) for i in range(self.M)])\n \n return self.neg_elbo", "def bel(self, element):\n if element.is_empty():\n return 0\n\n if self.is_empty():\n return 0\n\n if not element.is_compatible(next(iter(self.focals))):\n return 0\n \n result = 0\n for focal, value in self.items():\n if not focal.is_empty() and focal.is_subset(element):\n result += value\n return round(result, 6)", "def SumaryGastos(vj):\n\n vj.GastosCUC = 0.0\n for row in vj.tbGastos.rows.values():\n vj.GastosCUC += row.valCuc\n\n UpdateRecupIdx(vj)", "def compute_mixing_coefficients_bot(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n\n v_upts = TTTW_func.v2u(self.v)\n\n self.sigma_bot = []\n self.Kv0 = np.zeros([Ly,N+1])\n self.Kt0 = np.zeros([Ly,N+1])\n for j in range(Ly):\n # turbulent velocity sclaes with buoyancy effects neglected\n ustar2 = self.r_D[j] * np.sqrt(self.u[j,0]**2 + v_upts[j,0]**2)\n wm = self.vonKar * np.sqrt(ustar2)\n ws = wm\n \n for k in range(1,N):\n k_w = k\n k_r = k - 1\n\n if k_w < self.kbl[j]: # NEED Zob\n sigma = np.min( [ ((z_u_w[j,k_w] - z_u_w[j,0] + self.Zob) / (self.hbbl[j] + self.Zob)),1.])\n if j ==1:\n self.sigma_bot.append(sigma)\n a1 = sigma - 2.\n a2 = 3. - 2.*sigma\n a3 = sigma - 1.\n\n self.Kv0[j,k_w] = wm * self.hbbl[j] * ( sigma * (1. + sigma * ( a1 + a2*self.Gm1_bot[j]+a3*self.dGm1_dS_bot[j]))) \n self.Kt0[j,k_w] = ws * self.hbbl[j] * ( sigma * (1. + sigma * ( a1 + a2*self.Gt1_bot[j]+a3*self.dGt1_dS_bot[j])))", "def calculateAndAddComponents(img: ee.Image) -> ee.Image:\n img = img.select(coeffs[\"bands\"])\n components = [\n img.multiply(coeffs[comp]).reduce(ee.Reducer.sum()).rename(comp)\n for comp in [\"TCB\", \"TCG\", \"TCW\"]\n ]\n return img.addBands(components)", "def sebox(r=1):\n\n B = sesum(binary([[1,1,1],\n [1,1,1],\n [1,1,1]]),r)\n return B", "def gen_occupation_vector(state, states):\n\n # initialise vector of occupation numbers\n occ_vector = np.zeros([len(states[0])])\n\n # we square the weight because one contribution from bra and one from ket\n for i, istate in enumerate(states):\n occ_vector += (abs(state[i])**2)*istate\n\n return occ_vector", "def sumsquares(self):\n return np.dot((self.demeaned ** 2).T, self.weights)", "def aveEz2CC(self):\n if self.dim < 3:\n return None\n if getattr(self, '_aveEz2CC', None) is None:\n # The number of cell centers in each direction\n n = self.vnC\n if(self.dim == 3):\n self._aveEz2CC = kron3(speye(n[2]), av(n[1]), av(n[0]))\n return self._aveEz2CC", "def test01(self):\n a = np.arange(1e5)\n sa = a.sum(dtype='i8')\n ac = bcolz.carray(a)\n sac = ac.sum(dtype='i8')\n # print \"numpy sum-->\", sa\n # print \"carray sum-->\", sac\n self.assertTrue(sa.dtype == sac.dtype,\n \"sum() is not working correctly.\")\n self.assertTrue(sa == sac, \"sum() is not working correctly.\")", "def calc_Cinv_HEX(Q_design_W, gV):\n if Q_design_W > 0:\n InvC = 3000 # after A+W\n\n if Q_design_W >= 50000 and Q_design_W <= 80000:\n InvC = 3000 + 2.0/30 * (Q_design_W - 50000) # linear interpolation of A+W data\n\n if Q_design_W >= 80000 and Q_design_W < 100000:\n InvC = 5000.0\n #print \"A\"\n\n if Q_design_W > 100000:\n InvC = 80 * Q_design_W / 1000.0 - 3000\n #print \"B\"\n\n InvCa = InvC * gV.Subst_i * (1+ gV.Subst_i) ** gV.Subst_n / ((1+gV.Subst_i) ** gV.Subst_n - 1)\n\n else:\n InvCa = 0\n\n return InvCa", "def b_minus_bstar(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 and self.prob.Y[i] == -1:\n ayxx = 0\n for j in range(self.prob.num):\n ayxx += (self.alphas[j] + self.etas[j]) * self.prob.Y[j] * self.prob.xkernel(self.prob.X[j],\n self.prob.X[i])\n abcxx = 0\n for j in range(self.prob.num):\n abcxx += (self.alphas[j] + self.deltas[j]) * self.prob.xkernel(self.prob.X[j], self.prob.X[i])\n abcxx *= (1 / self.prob.gamma)\n running_total += -1 + abcxx - ayxx\n return running_total", "def zeta(self):\n r\"\"\"\n zeta_\\Lambda is defined as follow:\n for each fermionic box, count the number of bosonic boxes that\n are over it. Sum all these numbers, this is zeta_Lambda.\n \"\"\"\n fermionic_cells = self.fermionic_cells()\n bosonic_cells = self.bosonic_cells()\n zetas = [1\n for bos_box in bosonic_cells\n for fer_box in fermionic_cells\n if (bos_box[0] < fer_box[0] and bos_box[1] == fer_box[1])]\n return sum(zetas)", "def cumulative_energy_vertical(img):\n e = energy_map(img) # Total energy \n M = np.zeros((e.shape[0], e.shape[1]), dtype=type(e)) #To store cumulative minimum energy\n row,col = e.shape\n M[0] = e[0] #First row is same as energy_map first row\n for i in range(1,row):\n for j in range(0,col):\n if j == 0:\n M[i,j] = e[i,j] + min(M[i-1,j],M[i-1,j+1])\n elif j == col-1:\n M[i,j] = e[i,j] + min(M[i-1,j-1],M[i-1,j])\n else:\n M[i,j] = e[i,j] + min(M[i-1,j-1],M[i-1,j],M[i-1,j+1]) \n return M", "def cpskev(self):\n\n return self.cps / self.bin_widths", "def energy(self):\n sum_energy = 0.0\n for i in range(0,self.natoms-1):\n for j in range(i+1,self.natoms):\n rij = (self.atoms[i].xyz - self.atoms[j].xyz)\n rij = rij - self.pbc_correction(rij)\n mag_rij = la.norm(rij)\n sum_energy = sum_energy + self.pair_energy(self.epsilon, self.sigma, mag_rij) \n return sum_energy", "def NPV(B,C,BV,CV,d,pb,pc):\n b=[BV[0] if x=='L' else BV[1] for x in B] #decoding revenue\n c=[CV[0] if x=='L' else CV[1] for x in C] #decoding cost\n z=[b_i - c_i for b_i, c_i in zip(b, c)] #profit at each time\n npv=np.npv(d, z)\n pnpv=pb*pc\n return (npv,pnpv)", "def sum_over_energy(self):\n # Note that the array is using the opposite convention from WCS\n # so we sum over axis 0 in the array, but drop axis 2 in the WCS object\n return Map(np.sum(self.counts, axis=0), self.wcs.dropaxis(2))", "def calcAllIntensities(self, xc, yc):\n\n tp = 0.0\n ix = 0\n iy = 0\n h = 0\n ints = np.zeros([5, 5])\n ints_inner = np.zeros([5, 5])\n # ints = [[0.0] * 5] * 5\n # ints_inner = [[0.0] * 5] * 5\n x = 0.0\n y = 0.0\n xc1 = 0.0\n yc1 = 0.0\n xc1 = xc\n yc1 = yc\n \n for h in np.arange(1,5,1):\n for k in np.arange(1,5,1):\n ints[h][k] = 0.0\n ints_inner[h][k] = 0.0\n\n for k in np.arange(0, 2, 1):\n for h in np.arange(0, 2, 1):\n for ix in np.arange(0, self.stepp + 1, 1):\n for iy in np.arange(0, self.stepp + 1, 1):\n #print(k, h, ix, iy)\n if self.qc_format == 0 :\n x = -(1 + self.G) + h * (1 + 2 * self.G) + (ix * (1.0 / self.stepp))\n y = -(1 + self.G) + k * (1 + 2 * self.G) + (iy * (1.0 / self.stepp))\n if self.spot_radius == 0 or math.sqrt(math.pow((x - xc1),2) + math.pow((y - yc1),2)) == 0 :\n tp = 0.0\n else :\n tp = (math.sin((1 / self.spot_radius) * math.sqrt(math.pow((x - xc1),2) + math.pow((y - yc1),2)))) / ((1 / self.spot_radius) * math.sqrt(math.pow((x - xc1),2) + math.pow((y - yc1),2)))\n tp = math.pow(tp,2)\n #print(tp)\n elif self.qc_format == 1 :\n x = -1 + h + (ix * (1 / self.stepp))\n y = -1 + k + (iy * (1 / self.stepp))\n ints[h + 1][k + 1] += math.pow(math.exp((math.pow((x - xc1),2) + math.pow((y - yc1),2) ) / math.pow(self.spot_radius,2)), -1)\n if (self.spot_radius * self.spot_radius) == 0 or ((x - xc1) * (y - yc1) * np.pi * np.pi) == 0 :\n tp = 0.0\n else :\n tp = (math.sin((x - xc1) * np.pi / self.spot_radius) * math.sin((y - yc1) * np.pi / self.spot_radius)) / (((x - xc1) * (y - yc1) * np.pi * np.pi) / (self.spot_radius * self.spot_radius))\n\n if (math.pow(x,2) + math.pow(y,2)) <= math.pow(self.radius_inner,2):\n ints_inner[h + 1][k + 1] += tp\n else :\n if self.qc_format == 1 :\n if (math.pow(x,2) + math.pow(y,2)) <= math.pow(self.cell_qc, 2):\n ints[h + 1][k + 1] += tp\n if (math.pow(x,2) + math.pow(y,2)) <= 1 :\n #print(math.pow(x,2) + math.pow(y,2))\n ints[h + 1][k + 1] += tp\n # print(ints[h + 1][k + 1])\t\t\t\t\t\t\n tp = 0.0\n\n # print(ints)\n\n Aq = 0.0\n Bq = 0.0\n Cq = 0.0\n Dq = 0.0\n Ac_inner = 0.0\n Bc_inner = 0.0\n Cc_inner = 0.0\n Dc_inner = 0.0\n Ac = 0.0\n Bc = 0.0\n Cc = 0.0\n Dc = 0.0\n Ac = ints[1][2]\n Bc = ints[2][2]\n Cc = ints[2][1]\n Dc = ints[1][1]\n\n Ac_inner = ints_inner[1][2]\n Bc_inner = ints_inner[2][2]\n Cc_inner = ints_inner[2][1]\n Dc_inner = ints_inner[1][1]\n Ac *= self.QE\n Bc *= self.QE\n Cc *= self.QE\n Dc *= self.QE\n\n Ac_inner *= self.QE_inner\n Bc_inner *= self.QE_inner\n Cc_inner *= self.QE_inner\n Dc_inner *= self.QE_inner\n Ac += Ac_inner\n Bc += Bc_inner\n Cc += Cc_inner\n Dc += Dc_inner\n\n Aq = Ac\n Bq = Bc\n Cq = Cc\n Dq = Dc\n\n #tp/TP = cotribution percentage of the spot with respect to max (spot center)\n if self.smooth == 0 :\n if (Config.hplk_c0_e * self.TP) == 0 :\n cnst = 0\n else :\n cnst = ((Parameters.TPS / (self.n_ml * self.n_ml)) * self.lamb) / (Config.hplk_c0_e * self.TP) #Número de fótons efeticos\n if Config.flag_spice == 1 :\n Ac *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP) #W\n Bc *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP)\n Cc *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP)\n Dc *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP)\n Ac *= 1 / (math.pow(self.cell_qc * 1e-6,2)) #W/(m^2)\n Bc *= 1 / (math.pow(self.cell_qc * 1e-6,2))\n Cc *= 1 / (math.pow(self.cell_qc * 1e-6,2))\n Dc *= 1 / (math.pow(self.cell_qc * 1e-6,2))\n #Ac *= 1 / (self.lamb * 1e6); #Adequação da irradiância para a unidade W/m2micm conforme necessário no SPICE\n #Bc *= 1 / (self.lamb * 1e6);\n #Cc *= 1 / (self.lamb * 1e6);\n #Dc *= 1 / (self.lamb * 1e6);\n \n ############################## DOUBLE CHECK ##############################\n # self.grava_arquivos = 1\n # self.flag_V_QC = 0\n # grava_le_arquivos(0) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n # self.flag_V_QC = 1\n # self.grava_arquivos = 0\n ############################## DOUBLE CHECK ##############################\n Aq *= cnst * 1e9\n Bq *= cnst * 1e9\n Cq *= cnst * 1e9\n Dq *= cnst * 1e9\n else :\n Aq *= cnst * 1e9\n Bq *= cnst * 1e9\n Cq *= cnst * 1e9\n Dq *= cnst * 1e9\n\n # 'returns' all the intensities\n self.A_intensity = Aq\n self.B_intensity = Bq\n self.C_intensity = Cq\n self.D_intensity = Dq", "def __etaBin(self,eta):\n if len(self._etabins)>0:\n return reduce(lambda x,y:x+y,map(lambda x:abs(eta)>x,self._etabins))\n else:\n return 0", "def bayes(j, x, p, q, r):\n tmp = []\n P = [0.653, 0.347]\n c = 2\n for k in range(c):\n res = conditional(x, k, p, q, r) * P[k]\n tmp.append(res)\n num = conditional(x, j, p, q, r) * P[j] * 1.0\n denom = sum(tmp)\n bt = num / denom\n return bt", "def _etap(self,x):\n return self._eta_sfr_scaling(x,'p_cool') + self._eta_sfr_scaling(x,'p_hot')", "def method1(self):\n cres=0. # Variable for storing Chern number.\n # The U matrices from Fukui's method; storage...\n Ux=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n # ... and calculation of U matrices\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.alleigvecs[:,:,ix ,iy ]\n if ix<self.kS.Nx:\n mat2=self.alleigvecs[:,:,ix+1,iy ]\n else:\n mat2=self.alleigvecs[:,:,1 ,iy ]\n if iy<self.kS.Ny:\n mat3=self.alleigvecs[:,:,ix ,iy+1]\n else:\n mat3=self.alleigvecs[:,:,ix ,1 ]\n Ux[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[:self.NL,:self.NL])\n Uy[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[:self.NL,:self.NL])\n \n # Local estimates of Berry curvature; storage ...\n ftempall=np.zeros((self.kS.Nx,self.kS.Ny),complex)\n # ... and calculation\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux[ix,iy]*Uy[ix+1,iy]/Ux[ix,iy+1]/Uy[ix,iy])\n ftempall[ix,iy]=ftemp # ... of local Berry curvature ...\n cres+=ftemp/2./pi/1j # ... and of Berry phase (Chern number).\n\n return cres.real, ftempall", "def boundary(self):\n answer = self.zero()\n for k, v in self.items():\n for idx, cube in enumerate(k):\n acc_dim = sum((cube_l.dimension for cube_l in k[:idx]))\n for i in range(cube.dimension):\n for epsilon in (0, 1):\n new_cube = cube.face(i, epsilon)\n new_k = k[:idx] + (new_cube,) + k[idx + 1:]\n sign_exp = (acc_dim + i + epsilon) % 2\n answer += answer.create({new_k: v * (-1)**sign_exp})\n return answer", "def E(x, y):\n # sum of products of neighboring paris {xi, yi}\n xxm = np.zeros_like(x)\n xxm[:-1, :] = x[1:, :] # down\n xxm[1:, :] += x[:-1, :] # up\n xxm[:, :-1] += x[:, 1:] # right\n xxm[:, 1:] += x[:, :-1] # left\n xx = np.sum(xxm * x)\n xy = np.sum(x * y)\n xsum = np.sum(x)\n return h * xsum - beta * xx - eta * xy", "def bla_ipr(x):\n phi = x / np.sqrt(np.sum(x**2))\n return np.sum(phi**4)\n # if s2 < MACH_EPSILON:\n # # Zero sum. Could happen for veeery small overall prevalence.\n # return 0.\n # else:\n # return np.sum(x2 * x2 / (s2 * s2))", "def calcEVals(self):\n self.eVals,self.eVecs = np.linalg.eigh(self.rhoOp)", "def _ss_tot(self):\n squares = np.square(self.y - np.expand_dims(self._ybar, axis=-2))\n if self.w is None:\n return np.sum(squares, axis=-2)\n else:\n return np.sum(np.matmul(self.w_diag, squares), axis=-2)", "def amount_4_conto_energia(self, production, verbose=False):\n en_autocons = round( production * self.perc_autocons) \n energia_immessa_in_rete = production - en_autocons\n tot_incentivo_all_prod = production * self.incentivo_all_prod\n if en_autocons < self.used_external_en:\n tot_incentivo_ssp = self.incentivo_ssp * energia_immessa_in_rete\n else:\n tot_incentivo_ssp = self.incentivo_ssp * energia_immessa_in_rete + \\\n self.eccedenze * (en_autocons - self.used_external_en )\n if verbose:\n print(\"\\nincentivo_ssp: \",self.incentivo_ssp)\n print(\"incentivo_all_prod: \",self.incentivo_all_prod)\n print(\"en_autocons \", en_autocons) \n print(\"energia_immessa_in_rete \", energia_immessa_in_rete)\n print(\"tot_incentivo_all_prod \", tot_incentivo_all_prod) \n print(\"tot_incentivo_ssp \", tot_incentivo_ssp)\n \n \n return tot_incentivo_all_prod + tot_incentivo_ssp - self.spese_4", "def kinetic_energy(self):\r\n position, velocity, escaped_particles,impact, wall_collision,mom = self.box_collision_info()\r\n for j in xrange(1,self.n):\r\n abs_velocity = np.sqrt(velocity[:,0]**2+velocity[:,1]**2\r\n + velocity[:,2]**2)\r\n KE = 0.5*self.m*abs_velocity**2\r\n total_KE = np.sum(KE)\r\n invid_KE = total_KE/self.Npart\r\n\r\n return total_KE, invid_KE", "def compute_final_values(self, x):\n \n values = np.zeros(len(x[0]))\n \n for i in range(len(x)):\n values = values + np.array(x.values[i] * self.weights[i])\n \n return values", "def _uism(x):\n # get image channels\n R = x[:, :, 0]\n G = x[:, :, 1]\n B = x[:, :, 2]\n\n # first apply Sobel edge detector to each RGB component\n Rs = sobel(R)\n Gs = sobel(G)\n Bs = sobel(B)\n\n # multiply the edges detected for each channel by the channel itself\n R_edge_map = np.multiply(Rs, R)\n G_edge_map = np.multiply(Gs, G)\n B_edge_map = np.multiply(Bs, B)\n\n # get eme for each channel\n r_eme = eme(R_edge_map, 8)\n g_eme = eme(G_edge_map, 8)\n b_eme = eme(B_edge_map, 8)\n\n # coefficients\n lambda_r = 0.299\n lambda_g = 0.587\n lambda_b = 0.144\n\n return (lambda_r * r_eme) + (lambda_g * g_eme) + (lambda_b * b_eme)", "def ComputeNrb(self):\r\n pass", "def emb(self, entity):\n fv = []\n # fv.append(self.sum_pw(entity))\n fv.append(self.avg_pw(entity))\n fv.append(self.min_pw(entity))\n fv.append(self.max_pw(entity))\n fv.append(self.my_pw(entity))\n fv.append(self.min_e_score(entity))\n fv.append(self.max_e_score(entity))\n fv.append(self.avg_e_score(entity))\n # fv.append(self.new_edges(entity)) # only comes into play for regress.\n return fv", "def effective_cluster_weights(self):\n weights = np.array(\n [\n np.sum(\n self._subspace.function_ordering_multiplicities[\n self._subspace.function_orbit_ids == i\n ]\n * self.eci[self.eci_orbit_ids == i] ** 2\n )\n for i in range(len(self._subspace.orbits) + 1)\n ]\n )\n return weights", "def test02b(self):\n a = np.arange(101)\n b = bcolz.carray(a, chunklen=2, rootdir=self.rootdir)\n # print \"sum iter->\", sum(b.iter(-24, -3))\n self.assertTrue(sum(a[-24:-3]) == sum(b.iter(-24, -3)),\n \"Sums are not equal\")", "def integral_elements_in_box(K, C):\n d = K.degree()\n Z_F = K.maximal_order()\n Foo = K.real_embeddings()\n B = K.reduced_basis()\n\n import numpy\n import numpy.linalg\n L = numpy.array([ [v(b) for b in B] for v in Foo])\n Linv = numpy.linalg.inv(L)\n Vi = [[C[0][0]],[C[0][1]]]\n for i in range(1,d):\n Vi = sum([ [v + [C[i][0]], v + [C[i][1]]] for v in Vi], [])\n V = numpy.matrix(Linv)*(numpy.matrix(Vi).transpose())\n j = 0\n while j < 2**d:\n for i in range(d):\n if V[i,j] < V[i,j+1]:\n V[i,j] = math.floor(V[i,j])\n V[i,j+1] = math.ceil(V[i,j+1])\n else:\n V[i,j] = math.ceil(V[i,j])\n V[i,j+1] = math.floor(V[i,j+1])\n j += 2\n W0 = (Linv*numpy.array([Vi[0]]*d)).transpose()\n W = (Linv*numpy.array([Vi[2**i] for i in range(d)])).transpose()\n for j in range(d):\n for i in range(d):\n if W[i,j] < W0[i,j]:\n W[i,j] = math.floor(W[i,j])\n W0[i,j] = math.ceil(W0[i,j])\n else:\n W[i,j] = math.ceil(W[i,j])\n W0[i,j] = math.floor(W0[i,j])\n M = [[int(V[i,j]) for i in range(V.shape[0])] for j in range(V.shape[1])]\n M += [[int(W0[i,j]) for j in range(W0.shape[0])] for i in range(W0.shape[0])]\n M += [[int(W[i,j]) for j in range(W.shape[1])] for i in range(W.shape[0])]\n\n from sage.matrix.constructor import matrix\n M = (matrix(IntegerRing(),len(M),len(M[0]), M).transpose()).columns()\n\n i = 0\n while i < len(M):\n j = i+1\n while j < len(M):\n if M[i] == M[j]:\n M.pop(j)\n else:\n j += 1\n i += 1\n\n from sage.geometry.lattice_polytope import LatticePolytope\n P = LatticePolytope(M)\n S = []\n\n try:\n pts = P.points()\n except ValueError:\n return []\n\n for p in pts:\n theta = sum([ p.list()[i]*B[i] for i in range(d)])\n inbounds = True\n for i in range(d):\n inbounds = inbounds and Foo[i](theta) >= C[i][0] and Foo[i](theta) <= C[i][1]\n\n if inbounds:\n S.append(theta)\n\n return S", "def feature_energy(wv):\n return np.sqrt(np.sum(wv ** 2, axis=0)).T", "def calculate_macs(self) -> None:\n for name, param in self.module.named_parameters():\n if name == \"weight\":\n # ignore N, C when calculate Mult-Adds in ConvNd\n if \"Conv\" in self.class_name:\n self.macs += int(param.nelement() * prod(self.output_size[2:]))\n else:\n self.macs += param.nelement()\n # RNN modules have inner weights such as weight_ih_l0\n elif \"weight\" in name:\n self.macs += param.nelement()", "def _get_box_sizes(self, image_info, cat):\n\n\n file_id=0\n impath=image_info['image_path'][file_id].strip()\n ext=image_info['image_ext'][file_id]\n wcs_data = fitsio.read_header(impath, ext=ext)\n wcs = eu.wcsutil.WCS(wcs_data)\n\n\n jacob = wcs.get_jacobian(100,100)\n dudcol, dudrow, dvdcol, dvdrow = jacob\n\n det = dvdrow*dudcol - dvdcol*dudrow\n pixel_scale = np.sqrt(abs(det))\n print('found pixel scale:',pixel_scale)\n box_size = cat['box_size_arcsec']/pixel_scale\n\n # clip to range\n box_size.clip(\n min=self['min_box_size'],\n max=self['max_box_size'],\n out=box_size,\n )\n box_size = box_size.astype('i4')\n\n w,=np.where( ( (box_size % 2) != 0 ) )\n if w.size > 0:\n box_size[w] += 1\n\n return box_size", "def biotite():\n\n rho = 2800.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 186.; C[0,1] = 32.4; C[0,2] = 11.6; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 186.; C[1,2] = 11.6; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 54.; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 5.8; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 5.8; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 76.8\n\n return C, rho", "def test08b(self):\n a = np.arange(1e3)\n b = bcolz.carray(a, rootdir=self.rootdir)\n u = b.iter(3, 30, 3)\n w = b.iter(2, 20, 2)\n self.assertEqual(a.tolist(), list(b))\n self.assertEqual(sum(a[3:30:3]), sum(u))\n self.assertEqual(sum(a[2:20:2]), sum(w))", "def _ig_ksz(self, x, b):\n return self.P(x*self.r500) * (x / np.sqrt(x**2. - b**2.)) / self.Tloken(x)", "def compute_energy(img):\r\n # urmati urmatorii pasi:\r\n # 1. transformati imagine in grayscale\r\n # 2. folositi filtru sobel pentru a calcula gradientul in directia X si Y\r\n # 3. calculati magnitudinea imaginii\r\n\r\n img_gray_scale = cv.cvtColor(img, cv.COLOR_BGR2GRAY);\r\n\r\n #de cautat totusi si codul pt SOBEL pe net\r\n grad_x = cv.Sobel(img_gray_scale, ddepth = cv.CV_16S, dx = 1, dy = 0, borderType = cv.BORDER_CONSTANT)\r\n grad_y = cv.Sobel(img_gray_scale, ddepth = cv.CV_16S, dx = 0, dy = 1, borderType = cv.BORDER_CONSTANT)\r\n\r\n#E repr gradientii aka cat se sch un pixel de la unul la altul\r\n E = abs(grad_x) + abs(grad_y)\r\n # print(grad_y)\r\n # print(grad_x)\r\n\r\n cv.imwrite(\"poza.jpg\", E)\r\n return E", "def illuminator_of_elfes():\n\n\t# Alpha - simplified by taking out the i by multiplying the outerproduct by 2i\n\talpha1i = np.matrix([[0, 0, 0, 2], [0, 0, 2, 0], [0, -2, 0, 0], [-2, 0, 0, 0]])\n\talpha2i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, 2], [0, 0, -2, 0]])\n\talpha3i = np.matrix([[0, 0, 2, 0], [0, 0, 0, -2], [-2, 0, 0, 0], [0, 2, 0, 0]])\n\n\t# Betas - simplified by taking out the i by multiplication of outerprod by 2i\n\tbeta1i = np.matrix([[0, 0, 0, 2], [0, 0, -2, 0], [0, 2, 0, 0], [-2, 0, 0, 0]])\n\tbeta2i = np.matrix([[0, 0, 2, 0], [0, 0, 0, 2], [-2, 0, 0, 0], [0, -2, 0, 0]])\n\tbeta3i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, -2], [0, 0, 2, 0]])\n\n\t# print(\"alpha 1\")\n\t# print(alpha1i)\n\t# print(\"\")\n\t# print(\"alpha 2\")\n\t# print(alpha2i)\n\t# print(\"\")\n\t# print(\"alpha 3\")\n\t# print(alpha3i)\n\t# print(\"\")\n\t# print(\"beta 1\")\n\t# print(beta1i)\n\t# print(\"\")\n\t# print(\"beta 2\")\n\t# print(beta2i)\n\t# print(\"\")\n\t# print(\"beta 3\")\n\t# print(beta3i)\n\t# print(\"\")\n\n\t# abperm_comb = [ np.multiply(alpha1i,-1), np.multiply(alpha2i,-1), np.multiply(alpha3i,-1), np.multiply(beta1i,-1), np.multiply(beta2i,-1), np.multiply(beta3i,-1)]\n\n\tabperm_comb = [alpha1i, alpha2i, alpha3i, beta1i, beta2i, beta3i]\n\treturn abperm_comb", "def bse_re(self):\n p = self.model.exog.shape[1]\n return np.sqrt(self.scale * np.diag(self.cov_params())[p:])", "def sig(self, batch):\n ans = 0\n for t in [-1,-2,-3]:\n z = batch[0][t]\n ans += (z[:-1]*z[1:]).sum() \n return ans", "def bayes_cov_col(Y,X,cols,lm):\n\n #EM iterateit\n Yhat=pd.DataFrame(lm.predict(X))\n Yhat.index=Y.index\n Yhat.columns=Y.columns\n SSE_all=np.square(Y.subtract(Yhat))\n X_adjust=X.copy()\n\n\n df_SSE = []\n df_logit = []\n\n for curcov in cols:\n\n curcells=X[X[curcov]>0].index\n\n if len(curcells)>2:\n\n X_notcur=X.copy()\n X_notcur[curcov]=[0]*len(X_notcur)\n\n X_sub=X_notcur.loc[curcells]\n\n Y_sub=Y.loc[curcells]\n\n GENE_var=2.0*Y_sub.var(axis=0)\n vargenes=GENE_var[GENE_var>0].index\n\n Yhat_notcur=pd.DataFrame(lm.predict(X_sub))\n Yhat_notcur.index=Y_sub.index\n Yhat_notcur.columns=Y_sub.columns\n\n SSE_notcur=np.square(Y_sub.subtract(Yhat_notcur))\n SSE=SSE_all.loc[curcells].subtract(SSE_notcur)\n SSE_sum=SSE.sum(axis=1)\n\n SSE_transform=SSE.div(GENE_var+0.5)[vargenes].sum(axis=1)\n logitify=np.divide(1.0,1.0+np.exp(SSE_transform))#sum))\n\n df_SSE.append(SSE_sum)\n df_logit.append(logitify)\n\n X_adjust[curcov].loc[curcells]=logitify\n\n return X_adjust", "def test02(self):\n N, blen = self.N, 100\n a = bcolz.fromiter(xrange(N), dtype=np.float64, count=N,\n rootdir=self.rootdir)\n l, s = 0, 0\n for block in bcolz.iterblocks(a, blen, blen-1):\n l += len(block)\n s += block.sum()\n self.assertEqual(l, (N - (blen - 1)))\n self.assertEqual(s, np.arange(blen-1, N).sum())", "def total_KE(particles):\r\n return sum([particle.kinetic_energy() for particle in particles])", "def E_inc(self):\n\n\t\tmaxit = self.num_data + 100\n\n\t\tfor i in range(maxit):\n\t\t\tsqB", "def bse_fe(self):\n p = self.model.exog.shape[1]\n return np.sqrt(np.diag(self.cov_params())[0:p])", "def ncusps(self):\n n = self.level()\n return sum([arith.euler_phi(arith.gcd(d,n//d)) for d in n.divisors()])", "def E0_sum(r, k, fiber_radius, eps_out, eps_in, E0_mod, nmin_sc, nmax_sc, case):\n\n # refractive index of the cylinder relative \n # to that of the surrounding medium\n m = np.sqrt(eps_in / eps_out)\n E0 = Mie_scat_cyl.Es(r[0], r[1], r[2], k, fiber_radius,\n m, E0_mod, nmin_sc, nmax_sc, case)\n\n r_car = pol2cart(r)\n kvec_car = np.array([-k, 0, 0]) # normal incidence\n exp_factor = np.exp(1j * np.dot(kvec_car, r_car))\n if case == 1:\n Einc_car = np.array([0, 0, E0_mod], dtype=complex) * exp_factor\n # Ez is the same in pol and in cart coordinates\n E0 += Einc_car\n elif case == 2:\n Einc_car = np.array([0, E0_mod, 0], dtype=complex) * exp_factor\n E0 += vec_cart2pol(r_car, Einc_car)\n\n return(E0)", "def feature_energy24(wv):\n return np.sqrt(np.sum(wv[2:22, :, :] ** 2, axis=0)).T", "def Exbc(self):\n if getattr(self, '_Exbc', None) is None:\n self._Exbc = np.r_[0., 1.]\n return self._Exbc" ]
[ "0.63061666", "0.5997975", "0.588063", "0.5665232", "0.564657", "0.56458056", "0.55187875", "0.5485349", "0.5484678", "0.5478399", "0.5470462", "0.5465121", "0.54145944", "0.54053366", "0.5383228", "0.53753585", "0.5371241", "0.5369686", "0.5359706", "0.5344592", "0.5335335", "0.53338045", "0.5322084", "0.53151476", "0.531045", "0.5307426", "0.53071153", "0.5288922", "0.526718", "0.5262797", "0.52623314", "0.5258967", "0.52486336", "0.5243307", "0.52412474", "0.52393913", "0.5232515", "0.5197629", "0.51976204", "0.51790136", "0.5170517", "0.5166521", "0.5165113", "0.5160205", "0.5157411", "0.5156969", "0.51419747", "0.51320285", "0.5126691", "0.5120897", "0.5116371", "0.51158816", "0.5110666", "0.51105356", "0.51093656", "0.510668", "0.5100848", "0.50995153", "0.5091328", "0.5075711", "0.50734514", "0.5071899", "0.5056518", "0.5055586", "0.5054107", "0.5043385", "0.5040745", "0.50402695", "0.5038795", "0.5037549", "0.50370896", "0.5034177", "0.5025879", "0.50249916", "0.5024899", "0.50171816", "0.5015518", "0.5008371", "0.5000514", "0.49990377", "0.4998512", "0.49982014", "0.4997418", "0.49962205", "0.49960527", "0.4990355", "0.49899283", "0.49853638", "0.49840182", "0.4978489", "0.49768963", "0.49746317", "0.4972092", "0.49715966", "0.4969636", "0.49630103", "0.49607772", "0.49570242", "0.4955274", "0.49488425" ]
0.55135
7
MSE summed over the voxels intensities.
def mse_on_intensities(x, recon_x, scale_b): print(min(recon_x)) print(max(recon_x)) print(min(x)) print(max(x)) mse = F.mse_loss(recon_x, x, reduction='sum') / scale_b return mse
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mse(self):\n error = self._input * self._weights - self._label\n sum_ = 0.0\n for i in range(self._input.shape[0]):\n sum_ += error[i, 0]**2\n return sum_/self._input.shape[0]", "def _calc_msve(self):\n v = []\n for state in self._env.state_iterator():\n feature_vector = self._features.vector(state)\n v.append(utils.state_value(feature_vector, self.theta))\n\n self.msve.append(utils.rmse(v, self._true_values))", "def ivrmse(self):\n return (self.model_error_iv()**2).mean()**.5", "def mse(self, x_tensors=None):\n\n return self.se(x_tensors)/self.y.size", "def compute_MSE(e):\n\n return 1/2*np.mean(e**2)", "def calculate_mse(e):\n return 1/2*np.mean(e.dot(e))", "def _mse(self, weights):\n error = self._input * weights - self._label\n sum_ = 0.0\n for i in range(self._input.shape[0]):\n sum_ += error[i, 0]**2\n return sum_ / self._input.shape[0]", "def calculate_mse(e):\r\n return 1/2*np.mean(e**2)", "def mse(X, Y, W):\n\n # TODO\n mse = np.sum((X@W-Y)**2)/(2*X.shape[0])\n # END TODO\n\n return mse", "def MSE(actual, noisy):\n mean_squared_error(actual, noisy)", "def compute_mse(y, tx, w):\n print(\"y.shape = \", y.shape)\n print(\"tx.shape = \", tx.shape)\n e = y - tx.dot(w)\n print(\"e.shape = \", e.shape)\n mse = e.T.dot(e) / (2 * len(e))\n return mse", "def rmse(self):\n lam = self.lam()\n weights = lam / lam.sum()\n weighted_var = self.var() * weights\n rmse = np.sqrt(weighted_var.sum())\n return rmse", "def mse(self):\n xs, ys = self.R.nonzero()\n predicted = self.full_matrix()\n error = 0\n for x, y in zip(xs, ys):\n # print(predicted[x, y], self.R[x, y] )\n error += pow(self.R[x, y] - predicted[x, y], 2)\n return np.sqrt(error)", "def compute_mse(y, tx, w):\n e = y[:, np.newaxis] - tx @ w\n return (e * e).sum() / (2.0 * len(y))", "def mse(x, y):\n\n return (x - y).pow(2).sum(dim=1, keepdim=True).mean() / x.size(1)", "def MSE(X, y, w):\r\n n = X.shape[0]\r\n f = X @ w\r\n J = np.sum(np.power((y - f), 2)) / n\r\n return J", "def calculate_mse(in_img, out_img):\n error = 0\n for x in range(in_img.shape[0]):\n for y in range(in_img.shape[1]):\n error += (in_img[x, y] - out_img[x, y]) ** 2\n return error / (in_img.shape[0] * in_img.shape[1])", "def mse(self, X, Y):\n\t\treturn mse_k(X, to_1_of_K(Y))", "def sse(matrix,motif):\n return sum([site_error(matrix,site)**2\n for site in motif])", "def mse(datax,datay,w):\n return np.mean((datax.dot(w.T)-datay)**2)", "def rmse(self):\n return (self.model_error()**2).mean()**.5", "def calc_mse(data, ax=0):\n return ((data[:, 0] - data[:, 1]) ** 2).mean(axis=ax)", "def compute_mse(y, tx, w):\n e = y-tx@w\n return 1/(2*y.shape[0])*e.transpose()@e", "def compute_mse(y, tx, w):\n e = y - tx.dot(w)\n mse = e.dot(e) / (2 * len(e))\n return mse", "def mse ( target_array ):\n return np.mean ( ( target_array - np.mean ( target_array ) ) ** 2 )\n # End mse()", "def mse(self, data, *args, **kwargs):\n return self._mse(np.array(data), *args, **kwargs)", "def compute_mse(y, tx, w):\n e = y - tx@w\n mse = e.T.dot(e) /(2*len(e))\n return mse", "def d_mse(x, y):\n\n return 2 * (x - y) / x.size(0) / x.size(1)", "def mse(actual,expected):\n return np.mean(se(actual,expected))", "def mse_k(self, X, Y):\n\t\treturn np.power(Y - self.predict_soft(X), 2).sum(1).mean(0)", "def irmse(self) -> float:\n # Getting the gradient of the observed data\n obs_len = self.true.size\n obs_grad = self.true[1:obs_len] - self.true[0:obs_len - 1]\n\n # Standard deviation of the gradient\n obs_grad_std = np.std(obs_grad, ddof=1)\n\n # Divide RMSE by the standard deviation of the gradient of the observed data\n return float(self.rmse() / obs_grad_std)", "def sse(x, y):\n return sum(se(x, y))", "def MSE(self, imageA, imageB):\n return np.mean((imageA.astype(\"float\") - imageB.astype(\"float\")) ** 2)", "def calcSE(self):\n # Make sure the Variance is already calculated\n if not hasattr(self,'ZmAreaVar'):\n self.calcZmAreaVar()\n # Standard Error = Square Root of Variance\n self.SE = self.ZmAreaVar ** 0.5\n # Should I be returning the value also?\n # Or just setting the attribute?\n return self.SE", "def calcSE(self):\n # Make sure the Variance is already calculated\n if not hasattr(self,'ZmAreaVar'):\n self.calcZmAreaVar()\n # Standard Error = Square Root of Variance\n self.SE = self.ZmAreaVar ** 0.5\n # Should I be returning the value also?\n # Or just setting the attribute?\n return self.SE", "def mse(y, yhat):\n return 0.5 * jnp.mean((y - yhat)**2)", "def seToSE( x ):\n x = asarray(x,dtype=float)\n if x.shape != (6,):\n raise ValueError(\"shape must be (6,); got %s\" % str(x.shape))\n #\n return expM(screw(x))", "def evse(self, data, *args, **kwargs):\n darr = np.array(data)\n d = darr if len(darr.shape) == 1 else darr[0] / darr[1]\n return (d - self.evs(darr, *args, **kwargs))**2", "def loss_mse(self, coeffs, x_values, y_values):\n return np.mean(pow(self.f(x_values, coeffs) - y_values, 2))", "def calculate_mse(img0, img1):\n mse = skm.mean_squared_error(img0, img1)\n return mse", "def EST_NOISE(images):\n num = images.shape[0]\n m_e_bar = sum(images)/num\n m_sigma = np.sqrt(sum((images - m_e_bar)**2)/(num - 1))\n \n return m_sigma", "def rmse(x, y):\n return mse(x, y) ** .5", "def mse(result, expected):\n total_square_sum = 0\n for index1 in range(0, len(result)):\n total_square_sum += (result[index1] - expected[index1]) ** 2\n return total_square_sum / float(len(result))", "def MSE(y,yhat):\r\n #\r\n y = np.asarray(y)\r\n yhat = np.asarray(yhat)\r\n if y.size != yhat.size:\r\n raise(ValueError(\"y and yhat should be of same size now\\n\\\r\n size(y) = %d and size(yhat) = %d\"%(y.size,yhat.size)))\r\n N = yhat.size\r\n y = y.reshape(N,)\r\n yhat = yhat.reshape(N,)\r\n \r\n res = y - yhat\r\n sse = np.sum(res**2) #sum squared errors\r\n MSE = sse/N\r\n return(MSE)", "def sse(self, tmin=None, tmax=None):\n res = self.ml.residuals(tmin=tmin, tmax=tmax)\n return sum(res ** 2)", "def mse(self, image_a, image_b):\r\n data = numpy.sum((image_a.astype('float') - image_b.astype('float')) ** 2)\r\n data /= float(image_a.shape[0] * image_a.shape[1])\r\n return data", "def nE(self):\n return int(self.vnE.sum())", "def rmse(self):\n y_pred, y_true = self._finalize_labels_and_prediction()\n\n return np.sqrt(F.mse_loss(y_pred, y_true).cpu().item())", "def rmse(self):\n y_pred, y_true = self._finalize_labels_and_prediction()\n\n return np.sqrt(F.mse_loss(y_pred, y_true).cpu().item())", "def compute_RMSE(e):\n \"\"\"Corresponds to sqrt(2*MSE)\"\"\"\n \n return np.sqrt(2*compute_MSE(e))", "def mse(o, r):\r\n\r\n return np.mean(np.square((np.abs(o).astype(float) - np.abs(r).astype(float))))", "def mse (vec1, vec2):\n sum = 0.0 #Initializes sum to 0\n count = len(vec1) #Number of total elements in each vector\n for i in range(count):\n sum += (vec2[i]-vec1[i])**2 #Adds the square of the difference between the values at each position in the two vectors\n return sum/count", "def MSE(im, ref, reduce=True):\n return torch.mean((im - ref) ** 2)", "def MSE(ratings, range):\n\n def squared_err(pair):\n (r, rP) = pair\n return (r-rP)**2\n\n return (1/len(ratings)) * sum(map(squared_err, ratings))", "def compute_rmse(y, tx, w):\n return np.sqrt(2*compute_mse(y,tx,w))", "def mse(self, y):\n return T.mean((self.y_pred - y) ** 2)", "def calculate_E(self):\n \n E = 0\n for i in xrange(self.size):\n Ei = self.h[i]\n Ei += 0.5*sum((1 if self.spins[j] else -1)*self.J[i,j] for j in self.adjacency[i])\n if not self.spins[i]:\n Ei *= -1\n E += Ei\n \n return E", "def MSE(a,b,axis):\n return ((a-b)**2).mean(axis=axis)", "def computeMse(data_target, Y):\n if data_target.shape != Y.shape:\n print \"the shapes does not correspond\",\n print data_target.shape,\n print Y.shape\n exit(-1)\n return np.sum(np.square(data_target - Y) / Y.shape[0])", "def MeanSqError(self):\r\n\t\treturn self.mse", "def mse(predicted, actual):\n diff = predicted - actual\n return np.average(diff * diff, axis=0)", "def mse(response_vector, prediction_vector):\n return np.power(response_vector - prediction_vector, 2).mean()", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def calcEout(model_type): \n b, v = calcStatistics(model_type)\n return b + v", "def compute_rmse(y, tx, w):\n return np.sqrt(2 * compute_mse(y, tx, w))", "def calcEVals(self):\n self.eVals,self.eVecs = np.linalg.eigh(self.rhoOp)", "def mse(y_true: np.ndarray, y_pred: np.ndarray) -> float:\n return np.mean(np.power(y_true - y_pred, 2))", "def error_MSE(resid):\n if resid.ndim == 2:\n return (norm(np.asarray(resid).ravel())**2)/float(resid.shape[1])\n elif resid.ndim == 1:\n return (norm(np.asarray(resid).ravel())**2)\n else:\n raise Exception(\"array passed to error_MSE has incorrect shape\")", "def MeanSquaredError(y_data, y_model):\n\tn = np.size(y_model)\n\tMSE = (1/n)*np.sum((y_data-y_model)**2)\n\n\treturn MSE", "def mse(img1, img2):\n # TODO: implement this function.", "def mse(y, y_pred, verbose=True):\n\n mse_sum = 0\n\n for i in range(len(y)):\n mse_sum += mean_squared_error(y[i], y_pred[i])\n\n if verbose:\n print(f\"Mean MSE {mse_sum / len(y)}\")\n\n return mse_sum / len(y)", "def mse(x1, x2, axis=0):\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.mean((x1 - x2) ** 2, axis=axis)", "def mse(a, b):\n a = numpy(a)\n b = numpy(b)\n return ((a - b) ** 2).sum()", "def mse(mat1, mat2):\n\tmse = 0\n\tw, h = mat1.shape\n\tif mat1.shape != mat2.shape:\n\t\treturn -1\n\tprint(\"inside mse\")\n\tprint(mat1)\n\tprint(mat2)\n\tfor i in range(w):\n\t\tfor j in range(h):\n\t\t\tmse += \tpow((int(mat1[i,j]) - int(mat2[i,j])), 2)\n\treturn mse/ (w*h)", "def MSE(self, theta):\n V = np.array((theta * self.mu_phi).sum(axis=1))\n return np.mean((V - self.mu_accum_r) ** 2)", "def mse(image1: np.ndarray, image2: np.ndarray) -> np.ndarray:\n return np.sqrt(np.power((image1 - image2), 2).mean(axis=(-1, -2)))", "def _mse(self, trace, **inputs):\n phen_mse = []\n for idx in np.random.randint(0, len(trace), 500):\n step = self.trace[idx]\n exp_pred = np.dot(inputs['gwas_gen'],\n step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)\n mean_mse = np.mean(phen_mse)\n return mean_mse", "def mse_g(datax,datay,w):\n n,d=datax.shape\n return (2*datax.T).dot((datax.dot((w.T)-datay)))\n #retrouner les adjustement des poids:donc taille d*1", "def bse_re(self):\n p = self.model.exog.shape[1]\n return np.sqrt(self.scale * np.diag(self.cov_params())[p:])", "def error(self, trainset: ([], [])):\n # MSE = Σ | d – y |^2 / n\n error_sum = 0.0\n for index, example in enumerate(trainset):\n # | d – y |^2\n output = self.activate(example[0])\n\n target = example[1][0]\n\n error = target - output\n error_sum += error ** 2\n\n # Σ |error_sum| / n\n error_sum = error_sum / len(trainset)\n return error_sum", "def get_test_rmse(self, test):\n nnz_user, nnz_item = test.nonzero()\n nnz_test = list(zip(nnz_user, nnz_item))\n rmse = 0.0\n for u, i in nnz_test:\n user = self.user_matrix[u, :]\n item = self.item_matrix[:, i]\n pred = user @ item\n if pred > 5:\n pred = 5\n if pred < 1:\n pred = 1\n rmse += (self.data_matrix[u, i] - pred) ** 2\n rmse = np.sqrt(rmse / len(nnz_test))\n return rmse", "def mse_on_features(feature, recon_feature, logvar):\n mse = F.mse_loss(recon_feature, feature) / (2 * logvar.exp())\n mse = torch.mean(mse)\n return mse", "def rmse_from_mse (mse):\n return np.sqrt(2*mse)", "def mse(observed, predicted):\n return np.sqrt(np.mean((observed - predicted)**2))", "def sigma(self):\n with ops.name_scope(self.name):\n return self._cov.to_dense()", "def calc_sigma(evs):\n\n sigma = zeros(vecLen-1, dtype='d')\n sigma[0] = absolute(evs[0] - evs[1])\n\n for j in range(1, vecLen-1):\n term1 = absolute(evs[j] - evs[j-1])\n term2 = absolute(evs[j+1] - evs[j])\n sigma[j] = 0.5 * (term1 + term2)\n if sigma[j] is 0.0:\n print \"Degenerate mode at: \", j\n del j\n return sigma", "def mse(A, B):\n return ((A - B) ** 2).mean(axis=0)", "def calculate_E0(self) -> float:\n noisy = self.kernel_eigenvectors_[-1].copy()\n np.random.shuffle(noisy)\n\n kernel_eigenvectors = self.kernel_eigenvectors_[:-1]\n kernel_eigenvectors.append(noisy)\n\n eigenvectors_matrix = scipy.sparse.csr_matrix(\n np.column_stack([eigenvector for eigenvector in kernel_eigenvectors])\n )\n\n if len(kernel_eigenvectors) == 2:\n ev0 = kernel_eigenvectors[0]\n ev1 = kernel_eigenvectors[1]\n _, Gamma, _ = scipy.sparse.linalg.svds(\n ev0.T @ ev1, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n else:\n _, Gamma, _ = scipy.sparse.linalg.svds(\n eigenvectors_matrix, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n\n Gamma.sort()\n gamma2 = Gamma[-2]\n E0 = (1 + gamma2) / 2\n return E0", "def rmse(model, ratings):\n predictions = model.predict_all().clip(1, 5)\n predictions = predictions[ratings.nonzero()]\n true_values = ratings[ratings.nonzero()]\n \n return np.sqrt(np.mean((predictions - true_values)**2))", "def decomposed_mse(self) -> float:\n e_std = np.std(self.true)\n s_std = np.std(self.predicted)\n\n bias_squared = self.bias() ** 2\n sdsd = (e_std - s_std) ** 2\n lcs = 2 * e_std * s_std * (1 - self.corr_coeff())\n\n decomposed_mse = bias_squared + sdsd + lcs\n\n return float(decomposed_mse)", "def inrse(self) -> float:\n return float(np.sqrt(np.sum(np.square(self._error())) / np.sum(np.square(self.true - np.mean(self.true)))))", "def pixel_wise_mse(target, output):\n eps = kb.epsilon()\n output = kb.clip(output, eps, 1. - eps)\n return kb.sum(kb.pow(target - output, 2), axis=-1) / (2 * input_roi_size)", "def save_mse_vox_img(result, imgfile, outname, verbose):\n\n if verbose:\n print(\"Saving voxel-wise mse image\")\n\n img = nib.load(imgfile)\n nib.save(nib.Nifti1Image(result, img.get_affine()), outname)", "def sse_optimized(matrix,motif):\n #Hoisted computation of K out of site_error\n K = 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])\n for b in \"ACGT\"]))\n for i in range(L)])\n return sum([(site_error_optimized(matrix,site)+K)**2\n for site in motif])", "def mse(gt, pred):\n return np.mean((gt - pred) ** 2)", "def mse(gt, pred):\n return np.mean((gt - pred) ** 2)", "def rmse(self, weights=None) -> float:\n return sqrt(np.average((self.true - self.predicted) ** 2, axis=0, weights=weights))", "def compute_mse_loss(y, tx, w):\n e = y - np.dot(tx, w)\n N = e.size\n return (1./(2*N)) * np.sum(np.square(e))", "def energy(self):\n self.E = - np.sum(self.phi) + 0.5 * self.mass * np.sqrt((self.v_x ** 2 + self.v_y **2))", "def feature_energy(wv):\n return np.sqrt(np.sum(wv ** 2, axis=0)).T" ]
[ "0.7202287", "0.71245104", "0.70636916", "0.70570135", "0.6970779", "0.69215125", "0.6889753", "0.6849584", "0.67434496", "0.67034423", "0.6664834", "0.6608378", "0.6576306", "0.6547451", "0.64824516", "0.6472968", "0.6437196", "0.6421605", "0.6401893", "0.6368984", "0.6364525", "0.63640106", "0.63418436", "0.6301877", "0.62790465", "0.6278311", "0.627612", "0.6252638", "0.6199785", "0.6171157", "0.6132005", "0.6102094", "0.6087707", "0.6078915", "0.6078915", "0.60686374", "0.6061568", "0.60588163", "0.60426444", "0.60221255", "0.6014481", "0.60063416", "0.59999853", "0.5988559", "0.59667194", "0.59638083", "0.5963625", "0.596306", "0.596306", "0.5961979", "0.5946088", "0.5939968", "0.59371656", "0.59368867", "0.59349626", "0.5924473", "0.59211206", "0.5905039", "0.5890336", "0.58796364", "0.5877603", "0.58714116", "0.587131", "0.5856769", "0.5848173", "0.5847158", "0.5846339", "0.58461314", "0.5830979", "0.5820437", "0.5813676", "0.5811108", "0.5809502", "0.5804195", "0.57999176", "0.5791114", "0.57863146", "0.5782109", "0.5769444", "0.5768497", "0.5761398", "0.5759486", "0.5754883", "0.5753383", "0.5742652", "0.5741575", "0.5739435", "0.5738354", "0.5735814", "0.5734046", "0.5733712", "0.5723368", "0.570044", "0.56920004", "0.5691189", "0.5691189", "0.5666838", "0.56645197", "0.5661972", "0.5660106" ]
0.567657
96
MSE over features of FC layer of Discriminator.
def mse_on_features(feature, recon_feature, logvar): mse = F.mse_loss(recon_feature, feature) / (2 * logvar.exp()) mse = torch.mean(mse) return mse
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def discriminator_loss(discriminator, fake_images, real_images, fake_labels, real_labels, con_aug, stage):\n discriminator.train()\n criterion = nn.BCELoss()\n fake = fake_images.detach()\n condition = con_aug.detach()\n batch_size = real_images.size(0)\n \"\"\"\n ********************************************************************************\n The next two lines should be removed if we don't have a very powerful GPU.\n I cannot train the 256 x 256 image in stage 2 in my GPU(Tesla K80). So modified stage 2\n so that all processing are done for 64 x 64 and output is also 64 x 64 image.\n *********************************************************************************\n \"\"\"\n if (stage==2):\n real_images = F.interpolate(real_images, scale_factor = 4)\n real_dis_fea = discriminator(real_images)\n fake_dis_fea = discriminator(fake)\n\n \"\"\"\n Here we use three types of error and add them.\n real_error: error between real images and real labels.\n wrong_error: error between real images and wrong labels.\n fake_error: error between fake images and fake labels.\n \"\"\"\n real_logits = discriminator.conditioned_result(real_dis_fea, condition)\n real_error = criterion(real_logits, real_labels)\n\n wrong_logits = discriminator.conditioned_result(real_dis_fea[:(batch_size-1)], condition[1:])\n wrong_error = criterion(wrong_logits, fake_labels[1:])\n\n fake_logits = discriminator.conditioned_result(fake_dis_fea, condition)\n fake_error = criterion(fake_logits, fake_labels)\n\n if discriminator.unconditioned_result is not None:\n \"\"\"\n In case of stage 2 generator in addition to above errors we also\n use another error calculated from scores computed using the image features\n only without using the text features.\n \"\"\"\n real_logits1 = discriminator.unconditioned_result(real_dis_fea)\n uncond_real_error = criterion(real_logits1, real_labels)\n\n fake_logits1 = discriminator.unconditioned_result(fake_dis_fea)\n uncond_fake_error = criterion(fake_logits1, fake_labels)\n\n error = (real_error + uncond_real_error)/2.0 + (wrong_error+fake_error+uncond_fake_error)/3.0\n real_error = (real_error + uncond_real_error)/2.0\n fake_error = (fake_error + uncond_fake_error)/2.0\n\n else:\n error = real_error + (wrong_error * fake_error) * 0.5\n\n return error, real_error.item(), fake_error.item(), wrong_error.item()", "def _mse(self):\n error = self._input * self._weights - self._label\n sum_ = 0.0\n for i in range(self._input.shape[0]):\n sum_ += error[i, 0]**2\n return sum_/self._input.shape[0]", "def _calc_msve(self):\n v = []\n for state in self._env.state_iterator():\n feature_vector = self._features.vector(state)\n v.append(utils.state_value(feature_vector, self.theta))\n\n self.msve.append(utils.rmse(v, self._true_values))", "def bse_fe(self):\n p = self.model.exog.shape[1]\n return np.sqrt(np.diag(self.cov_params())[0:p])", "def loss(self):\n return 'mse'", "def model_e(input_shape=(None, 28, 28, 1), nb_classes=10):\n\n # Define a fully connected model (it's different than the black-box).\n layers = [Flatten(),\n Linear(200),\n ReLU(),\n Linear(200),\n ReLU(),\n Linear(nb_classes),\n Softmax()]\n\n return DefenseMLP(layers, input_shape)", "def __init__(self, image_size):\n super(SiameseDiscriminator, self).__init__()\n self.cnn1 = nn.Sequential(\n nn.ReflectionPad2d(1),\n nn.Conv2d(3, 4, kernel_size=3),\n nn.LeakyReLU(0.1, inplace=True),\n nn.BatchNorm2d(4),\n nn.Dropout2d(p=.2),\n\n nn.ReflectionPad2d(1),\n nn.Conv2d(4, 8, kernel_size=3),\n nn.LeakyReLU(0.1, inplace=True),\n nn.BatchNorm2d(8),\n nn.Dropout2d(p=.2),\n\n nn.ReflectionPad2d(1),\n nn.Conv2d(8, 8, kernel_size=3),\n nn.LeakyReLU(0.1, inplace=True),\n nn.BatchNorm2d(8),\n nn.Dropout2d(p=.2))\n\n self.fc1 = nn.Sequential(\n nn.Linear(8 * image_size * image_size, 500),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Linear(500, 500),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Linear(500, 15))", "def _multi_descriptors(self, features, mode):\n # Illumination variant, rotation variant\n illum_var_rot_var_head = self.relu(self.conv_illum_var_rot_var_1(\n features))\n illum_var_rot_var_head = self.bn_illum_var_rot_var_1(\n illum_var_rot_var_head)\n illum_var_rot_var_head = self.conv_illum_var_rot_var_2(\n illum_var_rot_var_head)\n\n # Illumination variant, rotation invariant\n illum_var_rot_invar_head = self.relu(self.conv_illum_var_rot_invar_1(\n features))\n illum_var_rot_invar_head = self.bn_illum_var_rot_invar_1(\n illum_var_rot_invar_head)\n illum_var_rot_invar_head = self.conv_illum_var_rot_invar_2(\n illum_var_rot_invar_head)\n\n # Illumination invariant, rotation variant\n illum_invar_rot_var_head = self.relu(self.conv_illum_invar_rot_var_1(\n features))\n illum_invar_rot_var_head = self.bn_illum_invar_rot_var_1(\n illum_invar_rot_var_head)\n illum_invar_rot_var_head = self.conv_illum_invar_rot_var_2(\n illum_invar_rot_var_head)\n\n # Illumination invariant, rotation invariant\n illum_invar_rot_invar_head = self.relu(\n self.conv_illum_invar_rot_invar_1(features))\n illum_invar_rot_invar_head = self.bn_illum_invar_rot_invar_1(\n illum_invar_rot_invar_head)\n illum_invar_rot_invar_head = self.conv_illum_invar_rot_invar_2(\n illum_invar_rot_invar_head)\n\n outputs = {'raw_rot_var_illum_var': illum_var_rot_var_head,\n 'raw_rot_invar_illum_var': illum_var_rot_invar_head,\n 'raw_rot_var_illum_invar': illum_invar_rot_var_head,\n 'raw_rot_invar_illum_invar': illum_invar_rot_invar_head}\n return outputs", "def generator_loss(discriminator, fake_images, real_labels, con_aug):\n\n discriminator.train()\n criterion = nn.BCELoss()\n condition = con_aug.detach()\n fake_img_fea = discriminator(fake_images)\n fake_logits = discriminator.conditioned_result(fake_img_fea, condition)\n fake_error = criterion(fake_logits, real_labels)\n\n if discriminator.unconditioned_result is not None:\n \"\"\"\n If it is a stage 2 discriminator then an additional error due to the\n score calculated from image features alone is added to the above error\n for loss calculation.\n \"\"\"\n fake_logits1 = discriminator.unconditioned_result(fake_img_fea)\n uncond_fake_error = criterion(fake_logits1, real_labels)\n fake_error += uncond_fake_error\n return fake_error", "def compute_MSE(e):\n\n return 1/2*np.mean(e**2)", "def discriminator_model():\n\n Discriminator = Sequential(name='Discriminator')\n\n # Downsampling : 32x32x3 --> 16x16x64\n Discriminator.add(Conv2D(filters=64, kernel_size=(5, 5), strides=2, padding='same', \n kernel_initializer=RandomNormal(stddev=GAUSS_SD), \n input_shape=DISCRIMINATOR_INPUT))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 16x16x64 --> 8x8x128\n Discriminator.add(Conv2D(filters=128, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 8x8x128 --> 4x4x256\n Discriminator.add(Conv2D(filters=128, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 4x4x256 --> 2x2x512\n Discriminator.add(Conv2D(filters=512, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Fully Connected Layer (classifier) , 2x2x512 (2048) --> 1\n Discriminator.add(Flatten())\n Discriminator.add(Dropout(DROPOUT))\n Discriminator.add(Dense(1))\n\n return Discriminator", "def MSE(im, ref, reduce=True):\n return torch.mean((im - ref) ** 2)", "def loss(self, feats: Tuple[torch.Tensor], data_samples: List[DataSample],\n **kwargs) -> dict:\n if self.dist:\n raise NotImplementedError(\n \"MMPretrain doesn't support to train\"\n ' the distilled version EfficientFormer.')\n else:\n return super().loss(feats, data_samples, **kwargs)", "def rmse(y_true, y_pred): # -> Any:\n ...", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def decomposed_mse(self) -> float:\n e_std = np.std(self.true)\n s_std = np.std(self.predicted)\n\n bias_squared = self.bias() ** 2\n sdsd = (e_std - s_std) ** 2\n lcs = 2 * e_std * s_std * (1 - self.corr_coeff())\n\n decomposed_mse = bias_squared + sdsd + lcs\n\n return float(decomposed_mse)", "def rese_block(x, num_features, cfg, name):\n if num_features != x.shape[-1].value:\n shortcut = Conv1D(num_features, kernel_size=1, padding='same', use_bias=True, name=f'{name}_scut_conv',\n kernel_regularizer=l2(cfg.weight_decay), kernel_initializer='glorot_uniform')(x)\n shortcut = BatchNormalization(name=f'{name}_scut_norm')(shortcut)\n else:\n shortcut = x\n\n for i in range(cfg.num_convs):\n if i > 0:\n x = Activation('relu', name=f'{name}_relu{i-1}')(x)\n x = Dropout(0.2, name=f'{name}_drop{i-1}')(x)\n x = Conv1D(num_features, kernel_size=3, padding='same', use_bias=True,\n kernel_regularizer=l2(cfg.weight_decay), kernel_initializer=taejun_uniform(), name=f'{name}_conv{i}')(x)\n x = BatchNormalization(name=f'{name}_norm{i}')(x)\n\n # Add SE if it is ReSE block.\n if cfg.amplifying_ratio:\n x = Multiply(name=f'{name}_scale')([x, squeeze_excitation(x, cfg.amplifying_ratio, name)])\n\n x = Add(name=f'{name}_scut')([shortcut, x])\n x = Activation('relu', name=f'{name}_relu1')(x)\n x = MaxPool1D(pool_size=3, name=f'{name}_pool')(x)\n return x", "def MI_Net_with_DS(dataset):\n # load data and convert type\n train_bags = dataset['train']\n test_bags = dataset['test']\n\n # convert bag to batch\n train_set = convertToBatch(train_bags)\n test_set = convertToBatch(test_bags)\n dimension = train_set[0][0].shape[1]\n weight = [1.0, 1.0, 1.0, 0.0]\n\n # data: instance feature, n*d, n = number of training instance\n data_input = Input(shape=(dimension,), dtype='float32', name='input')\n\n # fully-connected\n fc1 = Dense(256, activation='relu', kernel_regularizer=l2(args.weight_decay))(data_input)\n fc2 = Dense(128, activation='relu', kernel_regularizer=l2(args.weight_decay))(fc1)\n fc3 = Dense(64, activation='relu', kernel_regularizer=l2(args.weight_decay))(fc2)\n\n # dropout\n dropout1 = Dropout(rate=0.5)(fc1)\n dropout2 = Dropout(rate=0.5)(fc2)\n dropout3 = Dropout(rate=0.5)(fc3)\n\n # features pooling\n fp1 = Feature_pooling(output_dim=1, kernel_regularizer=l2(args.weight_decay), pooling_mode=args.pooling_mode, name='fp1')(dropout1)\n fp2 = Feature_pooling(output_dim=1, kernel_regularizer=l2(args.weight_decay), pooling_mode=args.pooling_mode, name='fp2')(dropout2)\n fp3 = Feature_pooling(output_dim=1, kernel_regularizer=l2(args.weight_decay), pooling_mode=args.pooling_mode, name='fp3')(dropout3)\n\n # score average\n mg_ave =average([fp1,fp2,fp3], name='ave')\n\n model = Model(inputs=[data_input], outputs=[fp1, fp2, fp3, mg_ave])\n sgd = SGD(lr=args.init_lr, decay=1e-4, momentum=args.momentum, nesterov=True)\n model.compile(loss={'fp1':bag_loss, 'fp2':bag_loss, 'fp3':bag_loss, 'ave':bag_loss}, loss_weights={'fp1':weight[0], 'fp2':weight[1], 'fp3':weight[2], 'ave':weight[3]}, optimizer=sgd, metrics=[bag_accuracy])\n\n # train model\n t1 = time.time()\n num_batch = len(train_set)\n for epoch in range(args.max_epoch):\n train_loss, train_acc = train_eval(model, train_set)\n test_loss, test_acc = test_eval(model, test_set)\n print('epoch=', epoch, ' train_loss= {:.3f}'.format(train_loss), ' train_acc= {:.3f}'.format(train_acc), ' test_loss={:.3f}'.format(test_loss), ' test_acc= {:.3f}'.format(test_acc))\n t2 = time.time()\n print('run time:', (t2-t1) / 60, 'min')\n print('test_acc={:.3f}'.format(test_acc))\n\n return test_acc", "def calculate_feature_statistics(feats):\r\n mu = np.mean(feats, axis=0)\r\n sigma = np.cov(feats, rowvar=False)\r\n return mu, sigma", "def rmse(self):\n return (self.model_error()**2).mean()**.5", "def rmse(self):\n y_pred, y_true = self._finalize_labels_and_prediction()\n\n return np.sqrt(F.mse_loss(y_pred, y_true).cpu().item())", "def rmse(self):\n y_pred, y_true = self._finalize_labels_and_prediction()\n\n return np.sqrt(F.mse_loss(y_pred, y_true).cpu().item())", "def generator_loss_std(score_discriminator):\n labels = Variable(torch.ones(score_discriminator.size()), requires_grad=False).type(torch.FloatTensor)\n bce_loss = nn.BCEWithLogitsLoss()\n loss = bce_loss(score_discriminator, labels)\n return loss", "def multi_mse(true, pred):\n try:\n pred = [t.mean() for t in pred]\n except:\n pass\n\n pred = np.array([p.numpy() for p in pred]).squeeze()\n return tf.keras.metrics.mean_squared_error(true.to_numpy().T, pred)", "def mae(self):\n y_pred, y_true = self._finalize_labels_and_prediction()\n\n return F.l1_loss(y_true, y_pred).data.item()", "def mae(self):\n y_pred, y_true = self._finalize_labels_and_prediction()\n\n return F.l1_loss(y_true, y_pred).data.item()", "def _mse(self, weights):\n error = self._input * weights - self._label\n sum_ = 0.0\n for i in range(self._input.shape[0]):\n sum_ += error[i, 0]**2\n return sum_ / self._input.shape[0]", "def calculate_mse(e):\n return 1/2*np.mean(e.dot(e))", "def mse_loss1(y_true,y_pred):\n return ((tf.keras.losses.MSE(tf.expand_dims(y_true, axis=0),tf.expand_dims(y_pred, axis=0))))", "def mse(y_true: np.ndarray, y_pred: np.ndarray) -> float:\n return np.mean(np.power(y_true - y_pred, 2))", "def __init__(self, S=7, B=2, C=20): \n super().__init__()\n self.mse = nn.MSELoss(reduction=\"sum\")\n self.S = S\n self.B = B\n self.C = C\n self.l_noobl = 0.5\n self.l_coord = 5", "def CE():\n def CE_loss(input,target):\n return nn.CrossEntropyLoss()(input.squeeze(), target)\n\n return CE_loss", "def fc_discriminator(input_tensor, is_training=True):\n del is_training\n flattened = tf.layers.flatten(input_tensor)\n d1 = tf.layers.dense(flattened, 1000, activation=tf.nn.leaky_relu, name=\"d1\")\n d2 = tf.layers.dense(d1, 1000, activation=tf.nn.leaky_relu, name=\"d2\")\n d3 = tf.layers.dense(d2, 1000, activation=tf.nn.leaky_relu, name=\"d3\")\n d4 = tf.layers.dense(d3, 1000, activation=tf.nn.leaky_relu, name=\"d4\")\n d5 = tf.layers.dense(d4, 1000, activation=tf.nn.leaky_relu, name=\"d5\")\n d6 = tf.layers.dense(d5, 1000, activation=tf.nn.leaky_relu, name=\"d6\")\n logits = tf.layers.dense(d6, 2, activation=None, name=\"logits\")\n probs = tf.nn.softmax(logits)\n return logits, probs", "def TST_LCE_D(S,N1,N_per,alpha,discriminator,device,dtype):\r\n np.random.seed(seed=1102)\r\n torch.manual_seed(1102)\r\n torch.cuda.manual_seed(1102)\r\n N = S.shape[0]\r\n f = torch.nn.Softmax()\r\n output = discriminator(S)\r\n STAT = abs(output[:N1,0].type(torch.FloatTensor).mean() - output[N1:,0].type(torch.FloatTensor).mean())\r\n STAT_vector = np.zeros(N_per)\r\n for r in range(N_per):\r\n ind = np.random.choice(N, N, replace=False)\r\n # divide into new X, Y\r\n ind_X = ind[:N1]\r\n ind_Y = ind[N1:]\r\n # print(indx)\r\n STAT_vector[r] = abs(output[ind_X,0].type(torch.FloatTensor).mean() - output[ind_Y,0].type(torch.FloatTensor).mean())\r\n S_vector = np.sort(STAT_vector)\r\n threshold = S_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n h = 0\r\n if STAT.item() > threshold:\r\n h = 1\r\n return h, threshold, STAT", "def test_cnn_fc_nodes(self):\n fc_hidden_nodes = 101\n model = modelgen.generate_CNN_model(\n (None, 20, 3), 2, [32, 32], fc_hidden_nodes)\n dense_layer = [l for l in model.layers if 'Dense' in str(l)][0]\n assert dense_layer.output_shape[1] == fc_hidden_nodes, \\\n 'Wrong number of fc nodes.'", "def sub_model_net(self):\r\n # define input\r\n x = keras.Input(shape=(960,), name='input')\r\n fc_2 = keras.layers.Dense(160, name='fc_2')(x)\r\n add_1 = keras.layers.Activation('relu')(fc_2)\r\n drop = keras.layers.Dropout(0.5)\r\n # output\r\n y_hat = keras.layers.Dense(1283, activation='softmax', name='output')(add_1)\r\n model = keras.Model(inputs=x, outputs=y_hat)\r\n\r\n return model", "def discriminator():\n\n # img = Input(shape=(28, 28, 1))\n # validity = ident(img)\n\n model = Model(img, validity)\n\n model.compile(loss=\"binary_crossentropy\", optimizer=op1,\n metrics=['accuracy'])\n\n # model.summary()\n\n return model", "def ensemble_perts(self):\n #emean = self.ensemble_mean()\n return self - self.ensemble_mean()\n #return self.state.values", "def softmax_mse(y_true, softmax_pred):\n return np.mean((y_true - softmax_pred)**2)", "def vae_loss(recon_x, x, mu_logvar):\n mu = mu_logvar[:, 0:int(mu_logvar.size()[1]/2)]\n logvar = mu_logvar[:, int(mu_logvar.size()[1]/2):]\n# KLD = -0.5 * torch.sum(1 + 2 * logvar - mu.pow(2) - (2 * logvar).exp())\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n# BCE = F.binary_cross_entropy(recon_x.squeeze(), x.squeeze(), reduction='sum')\n MSE = F.mse_loss(recon_x.squeeze(), x.squeeze(), reduction='sum')\n return {'loss': KLD + MSE,\n 'mse': MSE}", "def features_sigma(img,\n sigma,\n intensity=True,\n edges=True,\n texture=True):\n\n features = []\n\n gx,gy = np.meshgrid(np.arange(img.shape[1]), np.arange(img.shape[0]))\n # print(gx.shape)\n #features.append(gx)\n gx = filters.gaussian(gx, sigma)\n gy = filters.gaussian(gy, sigma)\n\n features.append(np.sqrt(gx**2 + gy**2)) #use polar radius of pixel locations as cartesian coordinates\n\n del gx, gy\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Location features extracted using sigma= %f' % (sigma))\n\n img_blur = filters.gaussian(img, sigma)\n\n if intensity:\n features.append(img_blur)\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Intensity features extracted using sigma= %f' % (sigma))\n\n if edges:\n features.append(filters.sobel(img_blur))\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Edge features extracted using sigma= %f' % (sigma))\n\n if texture:\n H_elems = [\n np.gradient(np.gradient(img_blur)[ax0], axis=ax1)\n for ax0, ax1 in itertools.combinations_with_replacement(range(img.ndim), 2)\n ]\n\n eigvals = feature.hessian_matrix_eigvals(H_elems)\n del H_elems\n\n for eigval_mat in eigvals:\n features.append(eigval_mat)\n del eigval_mat\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Texture features extracted using sigma= %f' % (sigma))\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Image features extracted using sigma= %f' % (sigma))\n\n return features", "def __init__(self, encoding_size=50, hidden_size=150, scene_size=100, scene_channels=6):\n\n super(ContextFusion, self).__init__()\n\n self.encoding_size = encoding_size\n self.mlp = nn.Sequential(\n nn.Linear(hidden_size + scene_channels, encoding_size),\n nn.Softplus(),\n nn.Linear(encoding_size, encoding_size),\n nn.Softplus(),\n )", "def test_forest_dml(self):\n\n Y, T, X, _ = ihdp_surface_B()\n est = AutomatedForestDML(model_y=automl_model_reg(),\n model_t=GradientBoostingClassifier(),\n discrete_treatment=True,\n n_estimators=1000,\n subsample_fr=.8,\n min_samples_leaf=10,\n min_impurity_decrease=0.001,\n verbose=0, min_weight_fraction_leaf=.01)\n est.fit(Y, T, X=X)\n _ = est.effect(X)", "def loss_fn(input_d, reconstructed, mean, logvar, beta=1, batch_size=1, input_size=1):\n\n # mse_criterion = nn.MSELoss() # reduction=sum ?\n # mse_loss = mse_criterion(input_d, reconstructed)\n\n # bce_criterion = nn.BCELoss(size_average=False) # reduction=sum ?\n bce_criterion = nn.BCELoss() # reduction=sum ?\n bce_loss = bce_criterion(input_d, reconstructed)\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n\n # for gaussian distribution when\n # generated data passed to the encorder is z~ N(0,1) and generated data is x~N(m,var)\n\n kl_loss = -0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp())\n\n normalized_kl_loss = kl_loss / (batch_size * input_size)\n scaled_kl_loss = beta*normalized_kl_loss\n # scaled_kl_loss = beta*kl_loss\n\n # return bce_loss + kl_loss, bce_loss, kl_loss\n return bce_loss + scaled_kl_loss, bce_loss, normalized_kl_loss\n # return mse_loss + scaled_kl_loss, mse_loss, kl_loss", "def vae_loss(gen_images, input_images, mu_sigmas):\n # List to aggregate binary cross-entropy reconstruction losses\n # from all of the image outputs:\n BCEs = []\n # List to aggregate KL divergence losses from each of the mu/sigma\n # projections:\n KLDs = []\n\n # TODO Your code goes here.\n\n return BCEs, KLDs", "def MSE(actual, noisy):\n mean_squared_error(actual, noisy)", "def mse(self, x_tensors=None):\n\n return self.se(x_tensors)/self.y.size", "def createDiscriminator(imgShape):\n model = tf.keras.Sequential()\n\n model.add(layers.Conv2D(32, kernel_size = 3, strides = 2, input_shape = imgShape, padding = \"same\"))\n model.add(layers.LeakyReLU(alpha = 0.2))\n\n model.add(layers.Dropout(0.25))\n model.add(layers.Conv2D(64, kernel_size = 3, strides = 2, padding = \"same\"))\n model.add(layers.ZeroPadding2D(padding = ((0,1), (0,1))))\n model.add(layers.BatchNormalization(momentum = 0.8))\n model.add(layers.LeakyReLU(alpha = 0.2))\n\n model.add(layers.Dropout(0.25))\n model.add(layers.Conv2D(128, kernel_size = 3, strides = 2, padding = \"same\"))\n model.add(layers.BatchNormalization(momentum = 0.8))\n model.add(layers.LeakyReLU(alpha = 0.2))\n\n model.add(layers.Dropout(0.25))\n model.add(layers.Conv2D(256, kernel_size = 3, strides = 1, padding = \"same\"))\n model.add(layers.BatchNormalization(momentum = 0.8))\n model.add(layers.LeakyReLU(alpha = 0.2))\n\n model.add(layers.Dropout(0.25))\n model.add(layers.Conv2D(512, kernel_size = 3, strides = 1, padding = \"same\"))\n model.add(layers.BatchNormalization(momentum = 0.8))\n model.add(layers.LeakyReLU(alpha = 0.2))\n\n model.add(layers.Dropout(0.25))\n model.add(layers.Flatten())\n model.add(layers.Dense(1, activation = \"sigmoid\"))\n\n return model", "def calculate_mse(e):\r\n return 1/2*np.mean(e**2)", "def discriminator_loss(self, real_output, fake_output):\n real_loss = self.cross_entropy(ones_like(real_output), real_output)\n fake_loss = self.cross_entropy(zeros_like(fake_output), fake_output)\n total_loss = real_loss + fake_loss\n return total_loss", "def _forward_densepose(self, features: Dict[str, torch.Tensor], instances: List[Instances]):\n if not self.densepose_on:\n return {} if self.training else instances\n\n features = [features[f] for f in self.in_features]\n # pdb.set_trace()\n if self.training:\n proposals, _ = select_foreground_proposals(instances, self.num_classes)\n # pdb.set_trace()\n features, proposals = self.densepose_data_filter(features, proposals)\n if len(proposals) > 0:\n proposal_boxes = [x.proposal_boxes for x in proposals]\n\n if self.use_decoder:\n features = [self.decoder(features)]\n\n features_dp = self.densepose_pooler(features, proposal_boxes)\n densepose_head_outputs = self.densepose_head(features_dp)\n densepose_predictor_outputs = self.densepose_predictor(densepose_head_outputs)\n densepose_loss_dict = self.densepose_losses(\n proposals, densepose_predictor_outputs, embedder=self.embedder\n )\n return densepose_loss_dict\n else:\n pred_boxes = [x.pred_boxes for x in instances]\n\n if self.use_decoder:\n features = [self.decoder(features)]\n\n features_dp = self.densepose_pooler(features, pred_boxes)\n if len(features_dp) > 0:\n densepose_head_outputs = self.densepose_head(features_dp)\n densepose_predictor_outputs = self.densepose_predictor(densepose_head_outputs)\n else:\n densepose_predictor_outputs = None\n\n # pdb.set_trace()\n # import imageio\n # ss = densepose_predictor_outputs.coarse_segm[0]\n # imageio.imwrite(\"tmp/coarse_segm_dp_argmax.png\", torch.argmax(ss,dim=0).detach().cpu().numpy())\n # imageio.imwrite(\"tmp/coarse_segm_dp_ch0.png\", ss[0].detach().cpu().numpy())\n # imageio.imwrite(\"tmp/coarse_segm_dp_ch1.png\", ss[1].detach().cpu().numpy())\n\n densepose_inference(densepose_predictor_outputs, instances)\n return instances", "def get_reg(self):\n loss = 0\n for name, m in self.net.named_children():\n if name.startswith('wave'):\n loss += m[0].GainLayer.get_reg()\n elif name.startswith('conv'):\n loss += 0.5 * self.wd * torch.sum(m[0].weight**2)\n loss += 0.5 * self.wd * torch.sum(self.fc1.weight**2)\n return loss", "def discriminator_model_lungs():\n # Initialize the weights\n init = tf.random_normal_initializer(0.0, 0.02)\n\n img_shape = (400, 400, 1)\n\n # Source and target image input\n source_img = tf.keras.Input(shape=img_shape)\n target_img = tf.keras.Input(shape=img_shape)\n\n # Concatenate images channel-wise\n src_tgt_img = Concatenate()([source_img, target_img]) # L : 400 x 400 x 1 # G: 200 x 200 x 1\n\n # C128\n d1 = Conv2D(filters=128, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n src_tgt_img) # L: 200 x 200 x 128 # G: 100 x 100 x 128 # RF: 4\n d1 = LeakyReLU(alpha=0.2)(d1)\n\n # C256\n d2 = Conv2D(filters=256, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d1) # G: 100 x 100 x 256 # L: 50 x 50 x 256 # RF: 10\n d2 = BatchNormalization()(d2)\n d2 = LeakyReLU(alpha=0.2)(d2)\n\n # C512\n d3 = Conv2D(filters=512, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d2) # G: 50 x 50 x 512 # L: 25 x 25 x 512 # RF: 22\n d3 = BatchNormalization()(d3)\n d3 = LeakyReLU(alpha=0.2)(d3)\n d3 = ZeroPadding2D()(d3) # G: 52 x 52 x 512 # L: 27 x 27 x 512\n\n # Patch output\n d4 = Conv2D(filters=1, kernel_size=(3, 3), strides=(1, 1), padding='valid', kernel_initializer=init)(\n d3) # G: 50 x 50 x 1 # L: 25 x 25 x 1 # RF: 38\n output_patch = Activation('sigmoid')(d4)\n\n # Define model\n discriminator_model = tf.keras.Model([source_img, target_img], output_patch)\n return discriminator_model", "def forward(self, images):\n features = self.resnet(images)\n features = Variable(features.data)\n features = self.pooling(features)\n # print(features)\n features = features.view(features.size(0),-1)\n # print(features)\n # print(resnet.fc.in_features)\n features = self.bn(self.linear(features))\n return features\n # with torch.no_grad():\n # features = self.resnet(images)\n # features = features.reshape(features.size(0), -1)\n # features = self.bn(self.linear(features))\n # return features", "def mse(self, X, Y):\n\t\treturn mse_k(X, to_1_of_K(Y))", "def _define_discriminator_loss(self):\n real_d_loss = tf.reduce_mean(self._real_discriminator_out)\n real_d_loss = tf.negative(real_d_loss, name='real_discriminator_loss')\n gen_d_loss = tf.reduce_mean(self._gen_discriminator_out,\n name='gen_discriminator_loss')\n return tf.add(real_d_loss, gen_d_loss, name='discrminator_loss')", "def recursive_feature_elimination(self):\n\t\tsvc = SVC(kernel=\"linear\")\n\t\tself.model = Pipeline([\n\t\t\t('feature_selection', RFE(estimator=svc, n_features_to_select=8, step=10)),\n\t\t\t('classification', self.model)\n\t\t\t])", "def get_reduced_features(features, n_dims_RFE=1):\n training_set = list()\n training_labels = list()\n for family in features:\n feature_mat = features.get(family)\n for j in range(len(feature_mat)):\n training_set.append(feature_mat[j])\n training_labels.append(family)\n\n clf = svm.SVC(kernel='linear')\n clf_reduced = RFE(clf, n_dims_RFE, step=1)\n clf_reduced = clf_reduced.fit(training_set, training_labels)\n X_new = clf_reduced.transform(training_set)\n X_mask = clf_reduced.get_support()\n return X_new, X_mask", "def mse_with_var_regularization(y_true, y_pred):\n return K.mean(y_true)", "def forward(self, images):\n # assuming that the precomputed features are not already l2-normalized\n #x = l2norm(images.view( images.size(0), -1))\n #print(images.shape, self.fc )\n resnet_feat=torch.empty(len(images),2048)\n vse_feat=torch.empty(len(images),256)\n\n for idx, feat_concat in enumerate(images):\n #print(\"check\", feat_concat[:2048].shape, feat_concat[2048:].shape)\n #resnet_feat[idx,:] = feat_concat[:2048]\n vse_feat[idx,:] = feat_concat[2048:]\n x = self.relu(self.fc1(vse_feat.cuda()))\n x = self.relu(self.fc2(x))\n x = self.fc3(x)\n\n return x", "def mutation(self, base_offsprings, model_features_count) :", "def yield_loss(self, outputs, targets):\n return torch.sqrt(nn.MSELoss()(outputs, targets))", "def feature_worth(model, train):\n error = cv(data=train, folds=5, model=model)\n print error\n model.fit(train)\n for var in model.variables:\n print var\n print model.fitted_models[var].feature_importances_", "def loss_mmse(y_true, y_pred, y_mask):\n y_shape = tf.shape(y_true)\n border = 3\n max_pixels_shifts = 2*border\n size_image = HR_SIZE\n size_croped_image = size_image - max_pixels_shifts\n clear_pixels = size_croped_image*size_croped_image\n cropped_predictions = y_pred[:, border:size_image -\n border, border:size_image-border]\n\n X = []\n for i in range(max_pixels_shifts+1): # range(7)\n for j in range(max_pixels_shifts+1): # range(7)\n cropped_labels = y_true[:, i:i+(size_image-max_pixels_shifts),\n j:j+(size_image-max_pixels_shifts)]\n cropped_y_mask = y_mask[:, i:i+(size_image-max_pixels_shifts),\n j:j+(size_image-max_pixels_shifts)]\n\n cropped_y_mask = tf.cast(cropped_y_mask, tf.float32)\n\n cropped_predictions_masked = cropped_predictions*cropped_y_mask\n cropped_labels_masked = cropped_labels*cropped_y_mask\n\n total_pixels_masked = tf.reduce_sum(cropped_y_mask, axis=[1, 2])\n\n # bias brightness\n b = (1.0/total_pixels_masked)*tf.reduce_sum(\n tf.subtract(cropped_labels_masked, cropped_predictions_masked),\n axis=[1, 2])\n\n b = tf.reshape(b, [y_shape[0], 1, 1, 1])\n\n corrected_cropped_predictions = cropped_predictions_masked+b\n corrected_cropped_predictions = corrected_cropped_predictions*cropped_y_mask\n\n corrected_mse = (1.0/total_pixels_masked)*tf.reduce_sum(\n tf.square(\n tf.subtract(cropped_labels_masked,\n corrected_cropped_predictions)\n ), axis=[1, 2])\n X.append(corrected_mse)\n\n X = tf.stack(X)\n minim = tf.reduce_min(X, axis=0)\n return minim", "def test(model, dataloader, j = 0):\n model.float()\n model.to(device)\n model.eval()\n criterion = nn.MSELoss()\n val_loss = 0\n num_batches = 0\n variances = np.array(0)\n for i, data in enumerate(dataloader):\n num_batches += 1\n data, target = data[0].to(device), data[1].to(device)\n output = model(data)\n loss = criterion(output, target)\n variances = np.append(variances, output.detach().cpu())\n val_loss += loss.item()\n mean_loss = val_loss / num_batches\n print('VAL LOSS for {} : {}'.format(j, mean_loss))\n print('VAL MEAN VARIANCE for {} : {}'.format(j, variances.mean()))\n \n return mean_loss", "def mean_tweedie_deviance(y_true, y_pred, *, sample_weight=..., power=...):\n ...", "def forward(self, images):\n # assuming that the precomputed features are not already l2-normalized\n #x = l2norm(images.view( images.size(0), -1))\n #print(images.shape, self.fc )\n resnet_feat=torch.empty(len(images),2048)\n vse_feat=torch.empty(len(images),256)\n\n for idx, feat_concat in enumerate(images):\n #print(\"check\", feat_concat[:2048].shape, feat_concat[2048:].shape)\n resnet_feat[idx,:] = feat_concat[:2048]\n vse_feat[idx,:] = feat_concat[2048:]\n x1 = self.fc1(resnet_feat.cuda())\n\n x2 = self.relu(self.fc2(vse_feat.cuda()))\n x2 = self.relu(self.fc3(x2))\n x2 = self.scale(self.fc4(x2))\n #print(x2.shape, x1.shape, x1[:,:5000].shape)\n x = x1[:,:5000]+x2\n \n\n return x", "def sigma(self):\n with ops.name_scope(self.name):\n return self._cov.to_dense()", "def forward(self, feats_S, feats_T):\n losses = 0.\n for s, t in zip(feats_S, feats_T): # B,C,1/16\n t = t.detach() # context path feature\n B, C, H, W = t.shape\n # patch_h, patch_w = H // 2, W // 2 # max_pool 到 2x2 计算\n patch_h, patch_w = H // 4, W // 4 # 控制输出 feature map 的大小\n # todo: 可以考虑调小 pool size\n maxpool = nn.MaxPool2d(kernel_size=(patch_w, patch_h), stride=(patch_w, patch_h),\n padding=0, ceil_mode=True)\n loss = self.criterion(maxpool(s), maxpool(t)) # 2x2\n losses += loss\n return losses", "def _discriminator_loss(self, y_real: tf.Tensor, y_fake: tf.Tensor) -> tf.Tensor:\n\n loss = self.loss(tf.ones_like(y_real), y_real - y_fake)\n\n return tf.reduce_mean(loss)", "def forward(self, input):\n self.loss = F.mse_loss(input, self.target)\n return input", "def test_edge_features(self):\n k = [4, 4, 4, 4, 4]\n mn = self.create_chain_model(k)\n\n d = 3\n\n for i in range(5):\n mn.set_edge_features((i, i+1), np.random.randn(d))\n\n mn.create_matrices()\n mn.set_unary_weight_matrix(np.random.randn(4, 4))\n mn.set_edge_weight_matrix(np.random.randn(d, 16))\n\n bp = MatrixBeliefPropagator(mn)\n\n bp.infer()\n bp.load_beliefs()\n\n unconditional_marginals = bp.var_beliefs[4]\n\n bp.condition(0, 2)\n bp.infer()\n bp.load_beliefs()\n\n conditional_marginals = bp.var_beliefs[4]\n\n assert not np.allclose(unconditional_marginals, conditional_marginals), \\\n \"Conditioning on variable 0 did not change marginal of variable 4\"\n\n mn.set_edge_features((2, 3), np.zeros(d))\n mn.create_matrices()\n mn.set_unary_weight_matrix(np.random.randn(4, 4))\n mn.set_edge_weight_matrix(np.random.randn(d, 16))\n\n bp.infer()\n bp.load_beliefs()\n\n unconditional_marginals = bp.var_beliefs[4]\n\n bp.condition(0, 2)\n bp.infer()\n bp.load_beliefs()\n\n conditional_marginals = bp.var_beliefs[4]\n\n assert np.allclose(unconditional_marginals, conditional_marginals), \\\n \"Conditioning on var 0 changed marginal of var 4, when the features should have made them independent\"", "def model_loss(self,input_real,input_z,out_channel_dim):\t\r\n label_smooth = 0.9 \r\n \r\n #get output of generator\r\n gen_img, gen_logits = self.generator(input_z,out_channel_dim,True)\r\n\r\n\t#pass real image to dicriminator\r\n disc_model_real, disc_logits_real = self.discriminator(input_real)\r\n\t\r\n\t#pass generated image to dicriminator\r\n disc_model_fake, disc_logits_fake = self.discriminator(gen_img,reuse=True)\r\n \r\n\t \t\r\n disc_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_logits_real,labels=label_smooth*tf.ones_like(disc_model_real))) \r\n disc_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_logits_fake,labels=tf.zeros_like(disc_model_fake)))\r\n \r\n\r\n\t\"\"\"\r\n\tLoss for discriminator is sum of loss for real image and fake image \r\n\t\"\"\"\t\r\n disc_loss = disc_loss_real + disc_loss_fake\r\n \r\n\r\n \"\"\"\r\n\tTo find loss for generator, fake image is passed with label= real (0.9)\r\n\t\"\"\"\r\n gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_logits_fake,labels=label_smooth*tf.ones_like(disc_model_fake)))\r\n \r\n return disc_loss,gen_loss,gen_img", "def se_block(x, num_features, cfg, name):\n x = basic_block(x, num_features, cfg, name)\n x = Multiply(name=f'{name}_scale')([x, squeeze_excitation(x, cfg.amplifying_ratio, name)])\n return x", "def combine_high_level_instance_features(crawl_dir):\n crawl_name = basename(normpath(crawl_dir))\n hi_level_feats_csv = join(dirname(dirname((dirname(realpath(__file__))))),\n \"data\",\n \"%s_hi_level_feats.csv\" % crawl_name)\n fability_scores_csv = join(dirname(dirname((dirname(realpath(__file__))))),\n \"data\",\n \"%s_fp_regression_labels.csv\" % crawl_name)\n # headers: url avg_f1 max_f1 avg_tpr max_tpr\n fability_df = pd.read_csv(fability_scores_csv, sep=',')\n df = pd.read_csv(hi_level_feats_csv, sep='\\t')\n # list of unique .onion domains\n domains = fability_df.url.unique()\n aggreage_feats = defaultdict(list)\n for domain in domains:\n instance_feats = df[df.i_site_domain == domain]\n # print domain, \"fability\", fability_df[fability_df.url == domain]\n for feat_name in instance_feats.columns:\n # Ignore features that starts with i_\n if feat_name.startswith(\"i_\"):\n continue\n feat_var_name = feat_name.replace(\"mo_\", \"var_\").replace(\"med_\", \"var_\")\n feat_std_dev_name = feat_name.replace(\"mo_\", \"stddev_\").replace(\"med_\", \"stddev_\")\n # print feat_name, \"STDDEV\", domain, instance_feats[feat_name].std()\n # add the variance\n # aggreage_feats[feat_var_name].append(instance_feats[feat_name].var())\n aggreage_feats[feat_std_dev_name].append(instance_feats[feat_name].std())\n if feat_name.startswith(\"mo_\"): # mode of the feature\n feat_mode = stats.mode(instance_feats[feat_name])[0][0]\n aggreage_feats[feat_name].append(feat_mode)\n elif feat_name.startswith(\"med_\"): # median of the feature\n aggreage_feats[feat_name].append(instance_feats[feat_name].median())\n else:\n print \"ERROR: Unrecognized high level feature name\", feat_name\n sys.exit(1)\n # add aggregate features to fability dataframe\n for feat in sorted(aggreage_feats.keys()):\n assert len(aggreage_feats[feat]) == 482\n fability_df[feat] = aggreage_feats[feat]\n # write the _aggregated high level feature file csv\n fability_df.to_csv(hi_level_feats_csv.replace(\".csv\", \"_aggregated.csv\"),\n sep=\"\\t\", index=False, index_label=False)", "def _score_fn(self, unused_context_features, group_features, mode, unused_params, unused_config):\n with tf.compat.v1.name_scope(\"input_layer\"):\n group_input = [\n tf.compat.v1.layers.flatten(group_features[name])\n for name in sorted(self.example_feature_columns())\n ]\n\n # if self.sparse_features:\n # self.sparse_emb_inputlist = [\n # tf.compat.v1.layers.flatten(group_features[name])\n # for name in self.sparse_features\n # ]\n\n self.group_input = group_input\n input_layer = tf.concat(self.group_input, 1)\n tf.compat.v1.summary.scalar(\"input_sparsity\",\n tf.nn.zero_fraction(input_layer))\n tf.compat.v1.summary.scalar(\"input_max\",\n tf.reduce_max(input_tensor=input_layer))\n tf.compat.v1.summary.scalar(\"input_min\",\n tf.reduce_min(input_tensor=input_layer))\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n cur_layer = tf.compat.v1.layers.batch_normalization(\n input_layer, training=is_training)\n for i, layer_width in enumerate(int(d) for d in self.hidden_layer_dims):\n cur_layer = tf.compat.v1.layers.dense(cur_layer, units=layer_width)\n cur_layer = tf.compat.v1.layers.batch_normalization(\n cur_layer, training=is_training)\n cur_layer = tf.nn.relu(cur_layer)\n tf.compat.v1.summary.scalar(\"fully_connected_{}_sparsity\".format(i),\n tf.nn.zero_fraction(cur_layer))\n\n cur_layer = tf.compat.v1.layers.dropout(\n cur_layer, rate=self.dropout_rate, training=is_training)\n logits = tf.compat.v1.layers.dense(cur_layer, units=self.group_size)\n self.logits = logits\n\n if self._use_multi_head():\n # Duplicate the logits for both heads.\n return {_PRIMARY_HEAD: logits, _SECONDARY_HEAD: logits}\n else:\n return logits", "def _initLoss(self):\n\n return torch.nn.MSELoss()", "def sgd(iterations):\n for iteration in range(0,iterations):\n error = []\n for user_id in range(0,latent_user_preferences.shape[0]):\n for item_id in range(0,latent_item_features.shape[0]):\n rating = user_ratings[user_id][item_id]\n if rating != 99:\n err = train(user_id, item_id, rating)\n error.append(err)\n mse = (np.array(error) ** 2).mean() \n if(iteration%1 == 0):#000 == 0 ):\n print(mse)\n return error", "def feature_func(ims):\n # Set eval mode\n # Force all BN layers to use global mean and variance, also disable\n # dropout.\n utils.may_set_mode(self.modules_optims, 'eval')\n ims = TVT(Variable(torch.from_numpy(ims).float()))\n feats, _ = self.googlenet(ims)\n feats = feats.data.cpu().numpy()\n return feats", "def se_block(x, ratio=8):\n se_feature = layers.GlobalAveragePooling2D()(x)\n channel = x._keras_shape[-1]\n se_feature = layers.Reshape((1, 1, channel))(se_feature)\n se_feature = layers.Dense(channel // ratio,\n activation='relu',\n kernel_initializer='he_normal',\n use_bias=True,\n bias_initializer='zeros')(se_feature)\n se_feature = layers.Dense(channel,\n activation='sigmoid',\n kernel_initializer='he_normal',\n use_bias=True,\n bias_initializer='zeros')(se_feature)\n se_feature = layers.multiply([x, se_feature])\n\n return se_feature", "def pipeline_rfe():\n\n\n\n #cols = [c for c in bank_df if bank_df[c].dtype == 'int64' or 'float64']\n #X_train = bank_df[cols].drop(columns = ['primary_merchant_name'], axis = 1)\n #y_train = bank_df['primary_merchant_name']\n #X_test = bank_df[cols].drop(columns = ['primary_merchant_name'], axis = 1)\n #y_test = bank_df['primary_merchant_name']\n\n #build a logistic regression and use recursive feature elimination to exclude trivial features\n log_reg = LogisticRegression(C = 1.0, max_iter = 2000)\n # create the RFE model and select most striking attributes\n rfe = RFE(estimator = log_reg, n_features_to_select = 8, step = 1)\n rfe = rfe.fit(X_train, y_train)\n #selected attributes\n print('Selected features: %s' % list(X_train.columns[rfe.support_]))\n print(rfe.ranking_)\n #following df contains only significant features\n X_train_rfe = X_train[X_train.columns[rfe.support_]]\n X_test_rfe = X_test[X_test.columns[rfe.support_]]\n #log_reg_param = rfe.set_params(C = 0.01, max_iter = 200, tol = 0.001)\n return X_train_rfe, X_test_rfe", "def sgd_mse_optimizer(model, config):\n learning_rate = config.get(\"lr\", 0.01)\n criterion = nn.MSELoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\n return criterion, optimizer", "def get_features(self, test_case):\n hidden_values = []\n for i in xrange(self.n_layers):\n if i == 0:\n layer_input = test_case\n else:\n layer_input = hidden_values[-1]\n\n hidden_value = self.sigmoid_layers[i].get_output(layer_input)\n hidden_values.append(hidden_value)\n \n # return the final hidden values\n return hidden_values[-1]", "def get_features(self, test_case):\n hidden_values = []\n for i in xrange(self.n_layers):\n if i == 0:\n layer_input = test_case\n else:\n layer_input = hidden_values[-1]\n\n hidden_value = self.sigmoid_layers[i].get_output(layer_input)\n hidden_values.append(hidden_value)\n \n # return the final hidden values\n return hidden_values[-1]", "def feature_dim(self):\n raise NotImplementedError", "def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):\n super(FCNDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n # sequence += [\n # nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n # norm_layer(ndf * nf_mult),\n # nn.LeakyReLU(0.2, True)\n # ]\n self.model = nn.Sequential(*sequence)\n # sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map\n self.fcn = nn.Sequential(nn.ConvTranspose2d(ndf*nf_mult, 1, kernel_size=32, stride=32, padding=0),\n nn.Sigmoid()\n )\n self.downConv = nn.Sequential(\n nn.Conv2d(ndf * nf_mult, ndf * nf_mult, kernel_size=4, stride=2, padding=1, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf * nf_mult, ndf * nf_mult, kernel_size=4, stride=1, padding=0, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf * nf_mult, 1, kernel_size=3, stride=1, padding=padw)\n )", "def __call__(self, features):\n normal_f = []\n for f in features:\n ss = 0\n for i in f:\n ss += i**2\n ss = ss**(1/2)\n temp = []\n for i in f:\n if ss != 0:\n i = i/ss\n temp.append(i)\n normal_f.append(temp)\n return normal_f\n raise NotImplementedError", "def my_mse(y_true, y_pred):\n return tf.reduce_mean(tf.square(y_true - y_pred), axis=-1)", "def extract_feat(self, img):\r\n _, _, x = self.pre_encoder(img)\r\n x = self.backbone(x)\r\n if self.with_neck:\r\n x = self.neck(x)\r\n return x", "def build_discriminator(self):\n img_shape = (self.img_size[0], self.img_size[1], self.channels)\n\n model = Sequential()\n ###############\n # Conv Stack 1:\n ###############\n model.add(\n Conv2D(128, kernel_size=5, strides=2, input_shape=img_shape, padding=\"same\")\n ) # 128x128 -> 64x64\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.2))\n\n ###############\n # Conv Stack 2:\n ###############\n model.add(\n Conv2D(128, kernel_size=5, strides=2, padding=\"same\")\n ) # 64x64 -> 32x32\n # model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 3:\n ###############\n model.add(\n Conv2D(128, kernel_size=4, strides=2, padding=\"same\")\n ) # 32x32 -> 16x16\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 4:\n ###############\n model.add(Conv2D(128, kernel_size=4, strides=1, padding=\"same\")) # 16x16 -> 8x8\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 5:\n ###############\n model.add(Conv2D(128, kernel_size=3, strides=1, padding=\"same\")) # 8x8 -> 4x4\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Dropout(0.4))\n\n model.add(Flatten())\n model.add(Dense(1, activation=\"sigmoid\")) # important binary classification.\n\n model.summary()\n\n # Model require Pair.\n img = Input(shape=img_shape)\n validity = model(img)\n\n return Model(img, validity)", "def mse_k(self, X, Y):\n\t\treturn np.power(Y - self.predict_soft(X), 2).sum(1).mean(0)", "def mean_gamma_deviance(y_true, y_pred, *, sample_weight=...):\n ...", "def _score_fn(context_features, group_features, mode, unused_params,\n\t\t\t\t\t\t\t\tunused_config):\n\t\twith tf.name_scope(\"input_layer\"):\n\t\t\tgroup_input = [\n\t\t\t\t\ttf.layers.flatten(group_features[name])\n\t\t\t\t\tfor name in sorted(example_feature_columns)\n\t\t\t]\n\t\t\tprint(group_input[0].shape)\n\t\t\tprint(group_input[0].dtype)\n\t\t\tcontext_input = [\n\t\t\t\t\ttf.layers.flatten(context_features[name])\n\t\t\t\t\tfor name in sorted(context_feature_columns)\n\t\t\t]\n\t\t\tprint(context_input[0].shape)\n\t\t\tprint(context_input[0].dtype)\n\t\t\tfinal_input = context_input + group_input\n\t\t\tinput_layer = tf.concat(final_input, 1)\n\t\t\ttf.summary.scalar(\"input_sparsity\", tf.nn.zero_fraction(input_layer))\n\t\t\ttf.summary.scalar(\"input_max\", tf.reduce_max(input_layer))\n\t\t\ttf.summary.scalar(\"input_min\", tf.reduce_min(input_layer))\n\n\t\tis_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\t\tcur_layer = tf.layers.batch_normalization(input_layer, training=is_training)\n\t\tfor i, layer_width in enumerate(int(d) for d in FLAGS.hidden_layer_dims):\n\t\t\tcur_layer = tf.layers.dense(cur_layer, units=layer_width)\n\t\t\tcur_layer = tf.layers.batch_normalization(cur_layer, training=is_training)\n\t\t\tcur_layer = tf.nn.relu(cur_layer)\n\t\t\ttf.summary.scalar(\"fully_connected_{}_sparsity\".format(i),\n\t\t\t\t\t\t\t\t\t\t\t\ttf.nn.zero_fraction(cur_layer))\n\t\tcur_layer = tf.layers.dropout(\n\t\t\t\tcur_layer, rate=FLAGS.dropout_rate, training=is_training)\n\t\tlogits = tf.layers.dense(cur_layer, units=FLAGS.group_size)\n\t\treturn logits", "def topGenes(X,Y,feature_name,class_len, feature_len, method, nb_samples, device, net): \n \n input_x = torch.from_numpy(X).float().to(device)\n if method == 'Shap':\n print(\"Running Shap Model... (It may take a long time)\")\n nb_samples = nb_samples\n rand_index = np.random.choice(input_x.shape[0], nb_samples, replace=True)\n background = input_x[rand_index]\n Y_rand = Y[rand_index].reshape(-1,1)\n Y_unique,Y_counts = np.unique(Y_rand,return_counts=True)\n # Create object that can calculate shap values and explain predictions of the model\n explainer = shap.DeepExplainer(net.encoder, background)\n # Calculate Shap values, with dimension (y*N*x) y:number of labels, N number of background samples, x number of features\n shap_values = explainer.shap_values(background)\n if method =='Captum_ig':\n baseline = torch.zeros((X.shape)).to(device)\n ig = IntegratedGradients(net.encoder)\n attributions, delta = ig.attribute(input_x, baseline, target=0, return_convergence_delta=True)\n if method =='Captum_dl':\n baseline = torch.zeros((X.shape)).to(device)\n dl = DeepLift(net.encoder)\n attributions, delta = dl.attribute(input_x, baseline, target=0, return_convergence_delta=True) \n if method =='Captum_gs':\n baseline_dist = (torch.randn((X.shape))* 0.001).to(device)\n gs = GradientShap(net.encoder)\n attributions, delta = gs.attribute(input_x, stdevs=0.09, n_samples=10, \\\n baselines=baseline_dist, target=0, return_convergence_delta=True) \n \n # Use the weight differences to do rank\n if class_len ==2:\n class_len = 1\n feature_rank = np.empty((feature_len,2*class_len), dtype=object) #save ranked features and weights\n # one class vs others\n for class_index in range(class_len):\n attributions_mean_list =[]\n Y_i = Y.copy()\n Y_i[ Y_i != class_index ] = class_index+1 # change to 2 class\n Y_unique,Y_counts = np.unique(Y_i,return_counts=True)\n # repeat 2 times\n for i in Y_unique:\n if method =='Shap':\n attributions_i = torch.from_numpy(shap_values[i]).float().to(device)\n else:\n attributions_i = attributions[Y_i==i] # find all X of each class\n attributions_mean = torch.mean(attributions_i, dim =0) \n attributions_mean_list.append(attributions_mean)\n # class_weight differences \n class_weight = attributions_mean_list[0] - attributions_mean_list[1] \n attributions_weight, index_sorted = torch.sort(class_weight, descending= True)\n attributions_name = np.array([feature_name[x] for x in index_sorted])\n attributions_weight = attributions_weight.detach().cpu()\n feature_rank[:,class_index*2 ] = attributions_name\n feature_rank[:,class_index*2+1 ] = attributions_weight \n \n # Save results as DAtaFrame \n mat_head = np.array(['topGenes' if x%2==0 else 'Weights' for x in range(class_len*2)])\n mat_head = mat_head.reshape(1,-1)\n mat = np.r_[mat_head ,feature_rank ]\n mat[1:, 1] = mat[1:,1]/float(mat[1,1])\n columns = ['Class'+str(int(x/2)+1) for x in range(class_len*2)] \n ind_df = ['Attributes']+ [str(x) for x in range(feature_len)]\n res = pd.DataFrame(mat,index=ind_df,columns=columns)\n return res", "def fclayer(in_features, out_features):\n fc = nn.Linear(in_features, out_features)\n nn.init.kaiming_normal_(fc.weight)\n return fc", "def _featurize(self, img):\n self._classifier.predict(img)\n return self._classifier.get_features()", "def mse_loss1_rgb(y_true,y_pred):\n y_true = tensor_ycbcr2rgb(y_true)/255.\n return ((tf.keras.losses.MSE(tf.expand_dims(y_true, axis=0),tf.expand_dims(y_pred, axis=0))))", "def loss_mse(self, coeffs, x_values, y_values):\n return np.mean(pow(self.f(x_values, coeffs) - y_values, 2))", "def forward(self, image):\n with torch.no_grad():\n img_feature = self.model(image) # [batch_size, vgg16(19)_fc=4096]\n img_feature = self.fc(img_feature) # [batch_size, embed_size]\n\n l2_norm = img_feature.norm(p=2, dim=1, keepdim=True).detach()\n img_feature = img_feature.div(l2_norm) # l2-normalized feature vector\n\n return img_feature", "def discriminator_loss_std(logits_real, logits_fake):\n bce_loss = nn.BCEWithLogitsLoss()\n labels_real = Variable(torch.ones(logits_real.size()), requires_grad=False).type(torch.FloatTensor)\n labels_fake = Variable(torch.zeros(logits_fake.size()), requires_grad=False).type(torch.FloatTensor)\n loss = bce_loss(logits_real, labels_real) + bce_loss(logits_fake, labels_fake)\n return loss" ]
[ "0.58933586", "0.5687825", "0.5670145", "0.5515114", "0.5477524", "0.54765", "0.54389805", "0.5409658", "0.5407765", "0.53701", "0.5356318", "0.5349838", "0.5349151", "0.5302134", "0.52876115", "0.5284638", "0.5263813", "0.52621937", "0.5254285", "0.52520597", "0.5234978", "0.5234978", "0.523051", "0.5228554", "0.52148885", "0.52148885", "0.52075684", "0.52056706", "0.5199554", "0.519953", "0.5183724", "0.5183073", "0.51284826", "0.5124226", "0.51201797", "0.5105501", "0.51023906", "0.508807", "0.5086922", "0.50799984", "0.50734276", "0.5072875", "0.5072361", "0.50668967", "0.50610995", "0.5053196", "0.50499016", "0.5047314", "0.5046037", "0.50449944", "0.5038255", "0.50371933", "0.5036709", "0.50278544", "0.50268495", "0.50241494", "0.50232047", "0.50177485", "0.5012367", "0.50122607", "0.50109506", "0.50092256", "0.5006707", "0.500531", "0.5004439", "0.500325", "0.50001216", "0.4999324", "0.49992862", "0.49950737", "0.49879375", "0.4985413", "0.49788135", "0.49786633", "0.49779767", "0.4971868", "0.4960106", "0.4959344", "0.49583933", "0.495785", "0.49484393", "0.49398214", "0.4932666", "0.4932666", "0.49292153", "0.49266812", "0.49261117", "0.49259055", "0.49247727", "0.49120474", "0.4911813", "0.49117196", "0.4907644", "0.48957276", "0.4894942", "0.48868036", "0.48853618", "0.48844877", "0.48823476", "0.48819548" ]
0.59223557
0
Send basic information of channel
async def channel_info(bot, message): if isinstance(CHANNELS, (int, str)): channels = [CHANNELS] elif isinstance(CHANNELS, list): channels = CHANNELS else: raise ValueError("Unexpected type of CHANNELS") text = '📑 **Indexed channels/groups**\n' for channel in channels: chat = await bot.get_chat(channel) if chat.username: text += '\n@' + chat.username else: text += '\n' + chat.title or chat.first_name text += f'\n\n**Total:** {len(CHANNELS)}' if len(text) < 4096: await message.reply(text) else: file = 'Indexed channels.txt' with open(file, 'w') as f: f.write(text) await message.reply_document(file) os.remove(file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_channel_message(self, status, data1=None, data2=None, ch=None):\n msg = [(status & 0xF0) | ((ch if ch else self.channel) - 1 & 0xF)]\n\n if data1 is not None:\n msg.append(data1 & 0x7F)\n\n if data2 is not None:\n msg.append(data2 & 0x7F)\n\n self._midi.send_message(msg)", "async def channel(self, ctx):\n pass", "def main(connection, info, conf) :\r\n connection.rawsend(\"NOTICE %s :\u0001TIME %s\u0001\\n\" % (info[\"sender\"], time.strftime(\"%b %d %Y, %H:%M:%S %Z\")))", "async def channel(self, ctx: commands.Context, channel: discord.TextChannel):\n self.channel = str(channel.id)\n await self._update_db()\n\n await ctx.send(f\"Done! {channel.mention} is the Starboard Channel now!\")", "async def channel(self, ctx, channel: discord.TextChannel):\r\n server = ctx.guild\r\n self._logs[str(server.id)][\"channel\"] = str(channel.id)\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(f\"<#{str(channel.id)}> has been set as the modlog channel {self.bot.get_emoji(470063310386233344)}\")", "def __bot_info(self):\n log.debug(\"Displaying __bot_info\")\n self.bot.send_message(self.chat.id, self.loc.get(\"bot_info\"))", "async def greeter_channel(self, ctx, *, channel: discord.TextChannel):\n await queries.update_setting(ctx, \"greeter_settings\", \"channel_id\", channel.id)\n await util.send_success(ctx, f\"Greeter channel is now {channel.mention}\")", "def msg_chan_send(channel, value, version = NATIVE_HEADER_VERSION, order=\"<\"):\n return message_no_reply(CHAN_SEND, channel, value, version, order)", "def send_part(self, channel) -> None:\n\n self.send_line('PART {}'.format(channel))", "async def channel_stats(self, ctx, channel: discord.TextChannel = None):\n channel = channel or ctx.channel\n embed = discord.Embed(\n title=f\"Stats for **{channel.name}**\",\n description=f\"{'Category: {}'.format(channel.category.name) if channel.category else 'This channel is not in a category'}\",\n color=discord.Color.blurple(),\n )\n embed.add_field(name=\"Channel Guild\",\n value=ctx.guild.name, inline=False)\n embed.add_field(name=\"Channel Id\", value=channel.id, inline=False)\n embed.add_field(\n name=\"Channel Topic\",\n value=f\"{channel.topic if channel.topic else 'No topic.'}\",\n inline=False,\n )\n embed.add_field(name=\"Channel Position\",\n value=channel.position, inline=False)\n embed.add_field(\n name=\"Channel Slowmode Delay\", value=channel.slowmode_delay, inline=False\n )\n embed.add_field(name=\"Channel is nsfw?\",\n value=channel.is_nsfw(), inline=False)\n embed.add_field(name=\"Channel is news?\",\n value=channel.is_news(), inline=False)\n embed.add_field(\n name=\"Channel Creation Time\", value=channel.created_at, inline=False\n )\n embed.add_field(\n name=\"Channel Permissions Synced\",\n value=channel.permissions_synced,\n inline=False,\n )\n embed.add_field(name=\"Channel Hash\", value=hash(channel), inline=False)\n\n await ctx.message.delete()\n await ctx.send(embed=embed)", "def send_chat_message(self, channel, message):\r\n self._send(\"PRIVMSG #{0} :{1}\".format(channel, message))", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def say(self, channel_name: str, text: str) -> None:\n self.connection.privmsg(channel_name, text)", "async def setwelcomechannel(self, ctx, *, channel : discord.TextChannel = None):\n\n isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator\n if not isAdmin:\n checkAdmin = self.settings.getServerStat(ctx.message.guild, \"AdminArray\")\n for role in ctx.message.author.roles:\n for aRole in checkAdmin:\n # Get the role that corresponds to the id\n if str(aRole['ID']) == str(role.id):\n isAdmin = True\n\n # Only allow admins to change server stats\n if not isAdmin:\n await ctx.channel.send('You do not have sufficient privileges to access this command.')\n return\n\n if channel == None:\n self.settings.setServerStat(ctx.message.guild, \"WelcomeChannel\", \"\")\n if self._getDefault(ctx.guild):\n msg = 'Welcome and goodbye messages will be displayed in the default channel (**{}**).'.format(self._getDefault(ctx.guild).mention)\n else:\n msg = \"Welcome and goodbye messages will **not** be displayed.\"\n await ctx.channel.send(msg)\n return\n\n # If we made it this far - then we can add it\n self.settings.setServerStat(ctx.message.guild, \"WelcomeChannel\", channel.id)\n\n msg = 'Welcome and goodbye messages will be displayed in **{}**.'.format(channel.mention)\n await ctx.channel.send(msg)", "def send(self, msg):\n self.message('Me', msg)", "def send(self, message):\n pass", "async def _info(self, ctx: Context):\n\n embed = discord.Embed(colour=await ctx.embed_colour())\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n embed.description = (\n \"TvM Assistant is a Discord bot with utility commands to make hosting TvMs easier.\"\n \"\\n\\nSome of the bot features include:\"\n \"\\n\\n- Setup roles and channel creation\"\n \"\\n- Management of sign-ups, sign-outs, spectators and replacements\"\n \"\\n- In-built logging to detect and ignore private channels\"\n \"\\n- Quick creation of player, mafia and spectator chats\"\n \"\\n- Vote counts and time since day/night started\"\n )\n\n links = (\n f\"\\n- [Invite to your server]({invite_url})\"\n f\"\\n- [Quickstart]({QUICKSTART})\"\n f\"\\n- [Commands Reference]({COMMANDS_REFERENCE})\"\n f\"\\n- [Source Code]({SOURCE_CODE})\"\n )\n\n embed.add_field(name=\"\\u200b\\nQuick Links\", value=links)\n embed.set_author(name=f\"About {ctx.me.name}\", icon_url=ctx.me.avatar_url)\n\n await ctx.send(embed=embed)", "async def info(ctx):\n embed = discord.Embed(title=\"Zane Bot\", description=\"All hail the hypnotoad!\", color=0x0091C5)\n\n # give info about you here\n embed.add_field(name=\"Author\", value=\"Zanexius\")\n\n # Shows the number of servers the bot is member of.\n embed.add_field(name=\"Server count\", value=f\"{len(bot.guilds)}\")\n\n # give users a link to invite thsi bot to their server\n embed.add_field(name=\"Invite\", value=\"[Invite link](<insert your OAuth invitation link here>)\")\n\n await ctx.send(embed=embed)", "async def _cmdf_setchannel(self, substr, msg, privilege_level):\n ch_obj = None\n if len(substr) == 0:\n ch_obj = msg.channel\n else:\n ch_obj = self._client.search_for_channel(substr, enablenamesearch=True, serverrestriction=self._server)\n\n if ch_obj is None:\n buf = \"**Error:** Channel not found. No changes were made.\"\n else:\n self._ch_msg_channelid = ch_obj.id\n self._save_settings()\n buf = \"In-channel greeting messages will now be sent in \" + utils.ch_to_mention(ch_obj) + \".\"\n await self._client.send_msg(msg, buf)\n return", "def part(self, channel, message=\"\"):\n time.sleep(1)\n self.s.send(\"PART %s%s\\n\" % (channel, (message and (\" :\" + message))))\n logger.log(\"PART %s%s\" % (channel, (message and (\" :\" + message)))).LogSend()", "def set_channel(self, channel):\n self.l1.setText(\"Channel: \" + str(channel))", "def send_msg():\n\tmessage = \"%s %s %d\\n\" % (metric, activeDAHDIChannels, int(time.time()))\n\t# print 'sending message:\\n%s' % message\n\tcarbonSocket = socket.socket()\n\tcarbonSocket.connect((CARBON_HOST, CARBON_PORT))\n\tcarbonSocket.sendall(message)\n\tcarbonSocket.close()\n\tlast_send = int(time.time())", "def channel_help(message):\n message.reply(Strings['HELP'].format(config.HELP_URL))", "async def send_initial_message(self, ctx: Context, channel: discord.TextChannel) -> discord.Message:\n\n return await channel.send(embed=self.embed)", "def channel(self):\n raise NotImplementedError", "async def send(self):", "async def _cmdf_chdemo(self, substr, msg, privilege_level):\n buf = \"**- - - - - In-Channel Greeting Demo - - - - -**\\n\"\n buf += self._get_ch_greeting(msg.author)\n buf += \"\\n**- - - - - In-Channel Greeting Demo - - - - -**\"\n await self._client.send_msg(msg, buf)\n return", "def action(self, channel, txt, *args, **kwargs):\n pass", "def send(self, data):", "async def on_ready():\n # channel = client.get_channel(695669957891194952)\n # await channel.send(\"Who wants to play The Game of 99?\")\n print(\"Who wants to play The Game of 99?\")", "async def setup(self, ctx):\n self.report_channel = ctx.message.channel\n with open('data/report_channel.json', 'w') as f:\n json.dump({\"channel\": self.report_channel.id}, f)\n await ctx.send('This channel is now the report channel')", "def sendMessage(sock, message):\n messageTemp = \"PRIVMSG \" + channel +\" :\" +message\n sock.send((messageTemp+ \"\\n\").encode())", "async def info(self, ctx):\n\t\tembed = discord.Embed(\n\t\t\tdescription=\"Created By Seperoph#1399 and AkaBaka#4654\",\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tembed.set_author(\n\t\t\tname=\"Bot Information\"\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Head Programmers:\",\n\t\t\tvalue=\"Seperoph#1399 and AkaBaka#4654\",\n\t\t\tinline=True\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Python Version:\",\n\t\t\tvalue=f\"{platform.python_version()}\",\n\t\t\tinline=True\n\t\t)\n\t\tawait ctx.respond(embed=embed)", "def channel(self):\n\n self._channel = self._connection.channel()\n print(\"Channel opened...\")", "async def on_member_join(member: discord.Member):\n for channel in member.server.channels:\n print(channel)\n if channel == \"general\":\n await member.send(f\"\"\"Welcome to the server {member.mention}!\"\"\")", "def send(self):\n # Copy the base packet then add the channel array\n packet = self._base_packet[:]\n packet.extend(self._channels)\n self._socket.sendto(packet, (self._host, self._port))\n logging.debug(\"Sending Art-Net frame\")", "def sendMsg(self, channel, message, length=None):\n self.logger.info(\"Sending in %s: %s\" % (channel, message))\n self.msg(channel, message, length)", "def main(connection, info, args, conf) :\r\n connection.rawsend(\"KICK %s %s :%s\\n\" % (info[\"channel\"], args[1], \" \".join(args[2:])))", "def send(self):\n raise NotImplementedError()", "def send(self, msg):\n return self._channel_action(msg, 1)", "def addchan(channel):", "async def info(self, ctx):\n\n level = await self.get_player_level(ctx.author)\n embed = discord.Embed()\n embed.colour = discord.Colour.blurple()\n embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)\n\n embed.title = f'Your current level : {level}'\n\n embed.add_field(name='Question', value=f'{self.enigmas[level][\"question\"]}')\n\n embed.set_footer(text='I love Ducks')\n\n await ctx.send(embed=embed)", "async def send(self, message):", "async def guild(ctx):\n print(ctx.channel)\n if ctx.channel.name.lower() in channels:\n await ctx.send(f\"\"\"guild: {ctx.guild.name}\"\"\")", "async def genericAnnounce(self, ctx, message):\n server = ctx.message.server\n server_dict = self.get_server_dict(ctx)\n\n try:\n channelId = server_dict['Transaction Channel']\n channel = server.get_channel(channelId)\n await self.bot.send_message(channel, message)\n await self.bot.say(\"Done\")\n except KeyError:\n await self.bot.say(\":x: Transaction log channel not set\")", "def __init__(self, channel, name):\n self._channel = channel\n self.name = name", "def cmd_channel_greet(self, c, e):\n c.privmsg(e.target(), 'Greetings %s!' % nm_to_n(e.source()))", "def _send_data(self):\n pass", "def send(self, x):\n print x", "def send_message(self, message):\n pass", "async def 서버(self, ctx):\n if isinstance(ctx.channel, discord.DMChannel) or ctx.guild.id != 749595288280498188:\n return await ctx.send(f\"**여기로! {ctx.author.name} 🍻\\n<{self.config.botserver}>**\")\n\n await ctx.send(f\"**{ctx.author.name}** 이게 제 집이잖아요~ :3\")", "def test_message_basic(self):\n transport = MockTransport(\"localhost\")\n rcomm = rc.RobotComm(transport)\n channel = rcomm.new_channel(\"testChannel\")\n remotenode = transport.new_remotenode(\"localhost\")\n channel.bind_to_remote_node(remotenode)\n try:\n rcomm.start_listening()\n\n testmessage = \"test message\"\n channel.start_receiving_messages()\n channel.send_message(\"MYTYPE\", testmessage)\n\n rmsg = None\n while not rmsg:\n rmsg = channel.poll_received_message()\n time.sleep(0.01)\n\n for stats in rcomm.get_channel_statistics():\n print(stats)\n\n channel.stop_receiving_messages()\n finally:\n rcomm.stop_listening()\n rcomm.close()\n self.assertTrue(rmsg)\n self.assertEqual(testmessage, rmsg.message)", "async def connect(self, channel_id: int):\n payload = {\n 'op': 4,\n 'd': {\n 'guild_id': self.guild_id,\n 'channel_id': str(channel_id),\n 'self_mute': False,\n 'self_deaf': False\n }\n }\n await self._bot._connection._get_websocket(int(self.guild_id)).send(json.dumps(payload))", "async def defchannel(self, ctx, channel: str):\n self.data_check(ctx)\n server = ctx.message.server\n\n self.riceCog2[server.id][\"defchannel\"] = channel\n dataIO.save_json(self.warning_settings,\n self.riceCog2)\n await self.bot.say(\"Log channel is now: **{}**\".format(channel))", "def _construct_message(self):\n self.message = {\"token\": self._auth, \"channel\": self.channel}\n super()._construct_message()", "def channels(message):\n for channel in message._client.channels:\n if 'is_member' in channel:\n message.reply(\"{} ({})\".format(channel['name'], channel['id']))\n elif 'is_im' in channel:\n #print(channel)\n friendlyname = channel['user']\n try:\n friendlyname = channel['user'][\"name\"]\n except (KeyError, AttributeError):\n pass\n message.reply(\"User channel: {} ({})\".format(friendlyname,\n channel['id']))", "def handle_message(self, data, channel):\n pass", "async def botinfo(ctx):\n colour = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])\n colour = int(colour, 16)\n embed = discord.Embed(colour = discord.Colour(value = colour), timestamp = datetime.datetime.utcnow())\n embed.add_field(name='Bot Info', value = \"I'm made with the library Discord.py Async.\"\n \" I'm developed by Shutdown.py#2406. \"\n \"If you need any help with me, Join my [devs' server](https://discord.gg/X4CJdEM).\"\n \"Send feedback using the feedback command\")\n embed.add_field(name='Total Commands', value=(len(bot.commands)))\n embed.add_field(name = 'Invite Me!', value = '[Invite](https://discordbots.org/bot/399115688792424448)')\n embed.set_footer(text= \"{} | Requested by: {} at\".format(version, ctx.message.author))\n await bot.say(embed = embed)", "def send_game_info( game, client_key, from_name, send_message_func ): # TODO: change game to lobby?\n\n game_info = message.Message( client_key, 'd' )\n new_message = game_info.new_message(from_name, game.game.game_name, game.get_player_names(),\n game.game.min_players, game.game.max_players, game.get_time_till_start())\n game_info.message = new_message\n game_info.to_clients = [ client_key ]\n\n send_message_func( game_info )", "def send(self, data):\n pass", "async def send_shortlived_message(self, message, channel, duration=5):\n pass", "def send(self, event, message):\n pass", "def channels(message):\n load_users(message._client.users)\n for x in message._client.channels:\n chan = message._client.channels[x]\n if 'is_member' in chan:\n if chan['is_member']:\n message.reply(\"{} ({})\".format(chan['name'], chan['id']))\n# message.reply(pretty_json(chan, True))\n elif 'is_im' in chan:\n print(chan)\n friendlyname = chan['user']\n try:\n friendlyname = chan['user'].name\n except KeyError:\n pass\n message.reply(\"User channel: {} ({})\".format(friendlyname,\n chan['id']))", "async def setwelcome(self, ctx, *, message = None):\n\n isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator\n if not isAdmin:\n checkAdmin = self.settings.getServerStat(ctx.message.guild, \"AdminArray\")\n for role in ctx.message.author.roles:\n for aRole in checkAdmin:\n # Get the role that corresponds to the id\n if str(aRole['ID']) == str(role.id):\n isAdmin = True\n # Only allow admins to change server stats\n if not isAdmin:\n await ctx.channel.send('You do not have sufficient privileges to access this command.')\n return\n\n if message == None:\n self.settings.setServerStat(ctx.message.guild, \"Welcome\", None)\n await ctx.channel.send('Welcome message removed!')\n return\n\n self.settings.setServerStat(ctx.message.guild, \"Welcome\", message)\n await ctx.channel.send('Welcome message updated!\\n\\nHere\\'s a preview:')\n await self._welcome(ctx.message.author, ctx.message.guild, ctx.message.channel)\n # Print the welcome channel\n welcomeChannel = self.settings.getServerStat(ctx.message.guild, \"WelcomeChannel\")\n if welcomeChannel:\n for channel in ctx.message.guild.channels:\n if str(channel.id) == str(welcomeChannel):\n welcomeChannel = channel\n break\n if welcomeChannel:\n msg = 'The current welcome channel is **{}**.'.format(welcomeChannel.mention)\n else:\n if self._getDefault(ctx.guild):\n msg = 'The current welcome channel is the default channel (**{}**).'.format(self._getDefault(ctx.guild).mention)\n else:\n msg = 'There is *no channel* set for welcome messages.'\n await ctx.channel.send(msg)", "def send_message(self, cmd_id, message_type, status, message=None):\n pass", "def part(self, channel):\n raise NotImplementedError", "async def _spec_chat(\n self, ctx: Context, *, channel: discord.TextChannel = None\n ):\n\n guild: discord.Guild = ctx.guild\n\n if not channel:\n channel = await guild.create_text_channel(\"spec-chat\")\n\n overwrites = {\n guild.default_role: discord.PermissionOverwrite(\n read_messages=False\n )\n }\n\n visible_roles = []\n\n host_id = await self.config.guild(guild).host_id()\n if host_id:\n visible_roles.append(\n discord.utils.get(guild.roles, id=host_id)\n )\n\n spec_id = await self.config.guild(guild).spec_id()\n if spec_id:\n visible_roles.append(\n discord.utils.get(guild.roles, id=spec_id)\n )\n\n dead_id = await self.config.guild(guild).dead_id()\n if dead_id:\n visible_roles.append(\n discord.utils.get(guild.roles, id=dead_id)\n )\n\n for role in visible_roles:\n overwrites[role] = discord.PermissionOverwrite(\n read_messages=True,\n send_messages=True\n )\n\n await channel.edit(overwrites=overwrites)", "def sendchat(self, the_id, msg):\r\n the_id = Client.toroomid(the_id)\r\n self.tx_cmd(FCTYPE.CMESG, the_id, 0, 0, msg)\r\n #@TODO - Emote encoding\r", "def send(self, mtype, **kwargs):\n\n self.transport.write(Message(mtype, **kwargs))\n\n if mtype == 'interested':\n self.am_interested = True\n elif mtype == 'not_interested':\n self.am_interested = False\n elif mtype == 'choke':\n self.am_choking = True\n elif mtype == 'unchoke':\n self.am_choking = False", "def channelinfo(self):\n\n return ChannelInfo(\n self._filetextbox.text(),\n self._idtextbox.text(),\n self._datafilebox.text()\n )", "async def part(self, channel : str):\n await self._connection.part(channel)", "def send(self):\r\n if self.connection:\r\n self.connection.send(self.getLine())\r\n else:\r\n print \"(0) message without connection could not be sent\"", "def __send(self) -> None:\n # region Docstring\n # endregion\n\n if len(self.entryline.get_text().strip()) > 0:\n self.udp.transmission(\n \"CHA\", \"01\", self.username, self.entryline.get_text().strip()\n )\n self.__addmsg(f\"<b>(YOU): </b><br>{self.entryline.get_text().strip()}<br>\")\n self.entryline.set_text(\"\")", "def write(self, cmd):\n self._chan.send(\"{}\\n\".format(cmd))\n logger.debug(\"sent '{}'\".format(cmd))", "def whenWriteReady(self, channel, call):", "def handle_datachan(bot, event):\n event.reply(event.chan.data.tojson())", "def handle(self):\n\n self.connection.client_version = self.packet[\"version\"]\n self.connection.client_name = self.packet[\"name\"]\n\n self.conn.send(smpacket.SMPacketServerNSCHello(\n version=128,\n name=self.server.config.server[\"name\"]))", "def _update_channel(self, channel, data):\n logging.info(\"Update channel `%s' information in DB\", data['name'])\n\n channel.update(data)\n channel.user = self.q(o.User).filter(o.User.slackid ==\n data['creator']).one_or_none()\n channel.purpose = self._get_create_obj(data['purpose'], o.Purpose,\n channel)\n channel.topic = self._get_create_obj(data['topic'], o.Topic, channel)\n self.session.flush()", "def sendHandshake(self):\n handshake = {\"channel\": \"/meta/handshake\",\n \"version\": \"1.0\",\n \"supportedConnectionTypes\": [\"websocket\"],\n \"id\": str(self.id)}\n self.sendMessage(json.dumps(handshake).encode('utf-8'))\n # Increment sending id, see GroupMe push api\n self.id += 1", "async def roominfo(self, ctx: Message):\n\t\tawait self.send(\n\t\t f\"Name: {self.room.name} • Description: {self.room.description} • ID: {self.room.id} • Member Count: {self.room.count} • Created at: {self.room.created_at} • Is Private?: {self.room.is_private}\"\n\t\t)", "def sendmessage(self):\n \n self.message.parentItem = self.rxtxcontroller.transmittable.rootItem\n self.message.can_id = self.idInput.toPlainText()\n self.message.dlc = self.lengthInput.value()\n self.message.cycle_time = self.cycleInput.toPlainText()\n self.message.time = int(round(time.time() * 1000))\n self.message.rxtx = \"TX\"\n self.message.count = 1\n self.message.data = self.dataInput.toPlainText()\n self.accept()", "async def setjoinlogchannel(self, ctx, channel):\r\n guild = ctx.message.guild\r\n channel = discord.utils.get(guild.channels, name=channel)\r\n functions.updatesql(server=ctx.guild.id, joinchannel=channel.id)\r\n await ctx.send(embed=discord.Embed(title='Sucsessful!'))", "def send_command(self):\n button = self.sender()\n answer: str = self.UsbHost.send_command(self.state.ser, self.command_dict[button], str(self.state.device_id))\n if answer == 'Ok':\n self.statusbar.showMessage(self.result_dict[button])\n else:\n error_message(self.error_dict[button])\n self.statusbar.showMessage(answer_translate[answer])\n self.create_log_message(self.command_dict[button], answer, \"\")", "def transmit(self, message):\n pass", "def send_connection_information(self):\n return self.connection_information", "def start_message(self, update, context):\n\n user = self.User(update)\n output = \"Greetings, we're happy that you decided to join and use the Bus4U service!\\n\" \\\n \"in order to see all the possible commands you can type /help\\n\" \\\n \"Also we want you to know that every command that you type and the server response will\" \\\n \"be logged and you can access your history with /history.\\n\\n\" \\\n \"we hope you'll enjoy the product and wish you the best.\\n Never Miss a Bus.\"\n user.send_message(output)\n self.data_base.log(user, \"*Showed Greeting Message*\")", "def send(self, command):\n if not self.debug:\n self.socket.send(command)\n logging.debug(\"SEND %s\" % command)\n else:\n logging.info(\"SEND %s\" % command)", "def send(self, message, sender):\n chatclient.receive_chat_message(message, sender)\n return {}", "def massage_addinfo(self) -> str:\n self.message_str= \"{}, {}\\n\".format(self.sent_by, self.time)", "def hop_channel(self, channel):\n self.logger.info(\"Hopping to channel %s\", channel)\n os.system(f\"iwconfig {self.interface} channel {channel}\")", "async def send(self, channel=None, **kwargs):\n\n if \"user\" in kwargs:\n api_call = self.client.chat_postEphemeral\n\n else:\n api_call = self.client.chat_postMessage\n\n return await api_call(\n channel=channel or self.channel,\n # contents of messenger[UserDict]\n **self,\n # any other API fields provided by Caller\n **kwargs,\n )", "def _send(self, message):\n logger.info(message)\n self.buffer.put(message)", "async def CoM9000(self, ctx):\n me = CoachService.discord_user_to_coach(ctx.author)\n data = getattr(special_play, inspect.currentframe().f_code.co_name)(ctx.channel.name, me)\n await self.send_embed(data, ctx)", "def on_welcome(self, raw_msg, server, port, nickname, **kwargs):", "async def send_message(self, channel : str, message : str):\n await self._connection.send_message(channel, message)", "def info(self, msg, *args, **kwargs):\n pass", "def send_irc_message(self, event):\n\n self.log('Transmitting IRC message', lvl=debug)\n\n self.fireEvent(PRIVMSG(event.username, \"[%s] %s : %s\" % (event.msg_type, event.subject, event.body)))", "async def info(self, ctx):\n self.logger.info(misolog.format_log(ctx, f\"\"))\n appinfo = await self.client.application_info()\n membercount = sum(1 for x in self.client.get_all_members())\n info_embed = discord.Embed(title=f\"Miso Bot | version {main.version}\",\n description=f\"Created by {appinfo.owner.mention}\\n\\n\"\n f\"Use `{self.client.command_prefix}help` to get the list of commands, \"\n f\"or visit the documention website for more help.\"\n f\"\\n\\nCurrently active in **{len(self.client.guilds)}** \"\n f\"servers totaling **{membercount}** unique users\",\n colour=discord.Colour.red())\n\n # info_embed.set_footer(text=f'version 2.0')\n info_embed.set_thumbnail(url=self.client.user.avatar_url)\n info_embed.add_field(name='Github', value='https://github.com/joinemm/miso-bot', inline=False)\n info_embed.add_field(name='Documentation', value=\"http://joinemm.me/misobot\", inline=False)\n info_embed.add_field(name='Patreon', value=\"https://www.patreon.com/joinemm\", inline=False)\n await ctx.send(embed=info_embed)" ]
[ "0.6436497", "0.6348447", "0.63190854", "0.62161165", "0.62013686", "0.6185579", "0.6169274", "0.6159009", "0.61348945", "0.61177874", "0.61107844", "0.61066043", "0.61066043", "0.61066043", "0.6086329", "0.60670966", "0.6052061", "0.60225135", "0.60011584", "0.59927464", "0.5983792", "0.59827304", "0.59801906", "0.59767556", "0.59682685", "0.5965726", "0.595448", "0.59449774", "0.5925171", "0.5919992", "0.59037465", "0.58653975", "0.58522356", "0.58493346", "0.5846947", "0.5845027", "0.58439755", "0.58347815", "0.5821641", "0.58207995", "0.5802796", "0.58026266", "0.5800993", "0.579934", "0.5784211", "0.57782656", "0.5767403", "0.57497984", "0.57427716", "0.57368517", "0.57336307", "0.5731375", "0.5730207", "0.57150275", "0.57142925", "0.5710924", "0.57061106", "0.5705187", "0.56978196", "0.56849277", "0.5683583", "0.5671442", "0.5666505", "0.566624", "0.5666006", "0.56638145", "0.5648956", "0.5646013", "0.56430846", "0.563452", "0.5631383", "0.56204486", "0.5617148", "0.56152904", "0.56072915", "0.56046367", "0.56039643", "0.56021696", "0.5601457", "0.56014305", "0.5598454", "0.5594901", "0.5592579", "0.5589757", "0.55744904", "0.5574312", "0.55738246", "0.55700773", "0.55617684", "0.55606145", "0.5559216", "0.5536974", "0.5536547", "0.5536345", "0.55268574", "0.5515485", "0.55133957", "0.55117387", "0.5511553", "0.55107296" ]
0.59144914
30
Show total files in database
async def total(bot, message): msg = await message.reply("Processing...⏳", quote=True) try: total = await Media.count_documents() await msg.edit(f'📁 Saved files: {total}') except Exception as e: logger.exception('Failed to check total files') await msg.edit(f'Error: {e}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def total_files(self):\n command = \"SELECT searched FROM options;\"\n return self.c.execute(command)", "def fs_files_total(self):\n return self._fs_files_total", "def getFileCount(self) -> int:\n ...", "def totalfiles(self):\n return len([sz for sz in self.iterate()])", "def fileCount(self):\n pass", "def file_num(self):\n command = \"SELECT COUNT(id) FROM files;\"\n return self.c.execute(command)", "def file_count(self) -> str:\n return pulumi.get(self, \"file_count\")", "def calc_total_rows(self):\n #total_rows = len(self.file_list) - 1 # Minus header\n print('Total number of rows: ' + str(self.tot_rows))\n results.append('Total number of rows: ' + str(self.tot_rows))", "def n_total_files(self):\n return len(self.fileinfo)", "def total_files_to_process(self) -> float:\n return pulumi.get(self, \"total_files_to_process\")", "def getCountFiles():\n result = 0\n session = Queries.createSession()\n try:\n result = session.execute(func.count(FileTable.id)).fetchone()[0]\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()\n return result", "def numberFiles(self):\n return self.n", "def file_size():\n return render_template(\"file_size.html\", file_size=file_size())", "def fileCounter(directory):", "def get_space_used():\n files = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status IN ('added', 'downloaded', 'unverified')\")\n\n total_size = 0\n for file in files:\n total_size += int(file['size'])\n return total_size", "def getFileCount(self):\n\n if self.filecount == -1:\n self.filecount = self.db.filecount()\n\n return self.filecount", "def getFileCount(self, startingWithPath=\"\"):\n return self.__controller._getRecordsCount(startingWithPath)", "def dirsize(self):\n total = 0\n for p in self.select_file(recursive=True):\n try:\n total += p.size\n except: # pragma: no cover\n print(\"Unable to get file size of: %s\" % p)\n return total", "def count_total():\r\n trans = transaction.begin()\r\n StatBookmarkMgr.count_total_bookmarks()\r\n trans.commit()", "def target_totalfiles(self):\n return self._cfg.get('totalfiles', None)", "def get_amount_of_data(directory: str):\n size = sum([os.path.getsize(os.path.join(directory, item)) for item in os.listdir(directory) if os.path.isfile(os.path.join(directory, item))])\n print(size)\n return size", "def total_number():\r\n total_number = 0\r\n file_read = read_file()\r\n for key in file_read:\r\n total_number = total_number + len(file_read[key])\r\n return total_number", "def count_files(self):\n self.file_count = 0\n self.count_files_loop(self.dirpath)\n return", "def __number_of_files(self):\n self.__get_files()\n return len(self.files)", "def count_likes(db, filename):\n cur = db.cursor()\n sql = \"\"\"\n select count(filename) from likes where filename=?;\n \"\"\"\n cur.execute(sql, (filename,))\n like_sum = cur.fetchone()[0]\n return like_sum", "def get_total_line_counts(self):\n return get_total_line_counts(self.files.all())", "def __str__(self):\n\n obj = self()\n return \"Total number of files is {} \" \\\n \"and their size is {} bytes\".format(*obj)", "def n_file(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=True):\n n += 1\n return n", "def Count_Documents(db):\r\n \r\n count = db.Transaction.estimated_document_count()\r\n print(\"Number of documents in the database Transaction: \" + str(count) + \".\\n\")\r\n return count", "def totalFiles(pathCopyData, pathNetCDF, dateInit, dateFinal):\n dateInit = datetime.strptime(dateInit, '%Y-%m-%d')\n dateFinal = datetime.strptime(dateFinal, '%Y-%m-%d')\n dirr = pathCopyData\n dirr2 = pathNetCDF\n #name = 'wrfout_c1h_d01_\\d\\d\\d\\d-\\d\\d-\\d\\d_00:00:00.\\d\\d\\d\\d.nc'\n name = 'wrfout_c1h_d01_\\d\\d\\d\\d-\\d\\d-\\d\\d_00:00:00.a\\d\\d\\d\\d'\n date = '\\d\\d\\d\\d-\\d\\d-\\d\\d'\n fil = []\n ba = []\n patron2 = re.compile(date)\n patron = re.compile(name + '.*')\n for base, dirs, files in os.walk(dirr2, topdown=True):\n for value in files:\n if patron.match(value) != None:\n f = patron2.findall(value)\n dateNetCDF = datetime.strptime(f[0], '%Y-%m-%d')\n if (dateNetCDF < dateFinal) & (dateNetCDF > dateInit):\n fil.append(value)\n ba.append(base)\n fdata = df.DataFrame(fil, columns=['nameFile'])\n fbase = df.DataFrame(ba, columns=['nameBase'])\n fdata.to_csv(dirr + 'tfile.txt', encoding='utf-8', index=False)\n fbase.to_csv(dirr + 'tbase.txt', encoding='utf-8', index=False)", "def total_file_length(self):\n if self.is_multi_file():\n return sum([file['length'] for file in self.torrent['info']['files']])\n else:\n # single file\n return self.torrent['info']['length']", "def _total_size_controller_fs(controller_fs_new, controller_fs_list):\n total_size = 0\n\n for fs in controller_fs_list:\n size = fs['size']\n if controller_fs_new and fs['name'] == controller_fs_new['name']:\n size = controller_fs_new['size']\n if fs['name'] == \"database\":\n size = size * 2\n total_size += size\n\n LOG.info(\n \"_total_size_controller_fs total filesysem size %s\" % total_size)\n return total_size", "def _total_size_controller_multi_fs(controller_fs_new_list):\n total_size = 0\n for fs in controller_fs_new_list:\n if fs.name == constants.FILESYSTEM_NAME_DATABASE:\n total_size += (2 * fs.size)\n else:\n total_size += fs.size\n return total_size", "def get_record_count(self):\n return os.path.getsize(self.path) / self._get_record_size()", "def get_size(files):\n somesize = 0\n for f in files:\n somesize += int(f.get('file_size'))\n return somesize", "def total(self):\n if self.dynamic:\n self._update_db_obj()\n return self._db_obj.total", "def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n", "def print_local_output_files_stats():\n print \"\\n\\nFILES CREATED:\"\n for filename in os.listdir('../output'):\n filesize = os.path.getsize('../output/' + filename)\n print str(filesize) + \"\\t\" + filename\n print \"\\n\"", "def count_total_line():\n count = 0\n file_count = 0\n for filename in os.listdir('.'):\n if filename.endswith(\".json\"):\n file_count += 1\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n count += 1\n print(\"There are {0} lines in {1} json files\".format(count, file_count))", "def files_processed(self) -> float:\n return pulumi.get(self, \"files_processed\")", "def getNumStatDataFiles(self):\n return self.nStatDataFiles", "def count_files_dir(self,full_path):\n try:\n num_files = len([name for name in os.listdir(full_path) if os.path.isfile(self.FILENAME)])\n print(f\"Number of files in {full_path} is {num_files}\")\n return num_files\n except Exception as e:\n raise SystemExit(f\"Could not complete operation: {e}\")", "def getsize(self):\n size =0\n for file in self.filelist:\n size += file.size\n return size", "def summarize_records(self):\r\n cache = self.retrieve_records_cache()\r\n return \"{} records\".format(\r\n len(cache[\"results\"])\r\n )", "def cli(count):\n\n if count:\n files = db.count_files()\n click.echo(\"Number of files on inventory: %s\" % files)\n else:\n archives = db.get_files()\n print \"ID - NAME - SIZE - CREATED\"\n for archive in archives:\n if archive.size:\n size = int(archive.size) / 1024.0 / 1024.0\n if format(size, '.2f') != '0.00':\n size = format(size, '.2f') + \" mb\"\n else:\n # Under 1 kb\n size = format(size * 1024 * 1024, '.0f') + \" bytes\"\n\n\n else:\n size = \"Unknown\"\n print \" %s - %s - %s - %s\" % (archive.id, archive.name, size, archive.created_at)", "def size(self) -> int:\n size = 0\n for file in self.files.values():\n size += file.size\n\n return size", "def dataset_summary(dbs_url, dataset):\n expire = 600 # set some expire since we're not going to use it\n # we call filesummaries?dataset=dataset to get number of files/blks\n dbs_url += '/filesummaries'\n dbs_args = {'dataset': dataset, 'validFileOnly': 1}\n headers = {'Accept': 'application/json;text/json'}\n source, expire = \\\n getdata(dbs_url, dbs_args, headers, expire, ckey=CKEY, cert=CERT,\n system='dbs3')\n for row in json_parser(source, None):\n totfiles = row[0]['num_file']\n totblocks = row[0]['num_block']\n return totblocks, totfiles", "def totals():\n return make_simple_tsv_get_response(TOTALS_FILE, 'totals')", "def get_total_number_of_documents(self):\n return self.total_number_of_documents", "def print_file_stats(self):\n\n # current epoch time, file number, filename, filesize, trans secs, status\n print(f\"TRANS_STATS_FILE: {time.time()} {self.batchvals['numfiles']} {self.filevals['filename']} {self.filevals['numbytes']} {self.filevals['end_time'] - self.filevals['start_time']} {self.filevals['status']}\")", "def getnrfiles(self):\n return len(self.filenames)", "def fs_size_total(self):\n return self._fs_size_total", "def get_folder_total(path):\n files = os.listdir(path)\n pythonfiles = ['%s/%s' % (path, filename) for filename in files if filename[-3:] == '.py']\n total = { 'net': 0, 'total': 0, 'nonblank': 0, 'num_inputs':0 }\n for filename in pythonfiles:\n with open(filename, 'r') as thisfile:\n blob = thisfile.read()\n # print filename\n thisloc = loc(blob)\n for k, v in thisloc.items():\n total[k] += v\n return total", "async def num_fomod_files_to_install(self):\n n = 0\n for f in self.fomod.files_to_install:\n if f.type == \"folder\":\n n += await self.count_folder_contents(f.source)\n else:\n n += 1\n\n return n", "def test_upload_count(self):\n conn = initialize_connection()\n db = conn.picdb\n coll = db.images\n\n num = coll.count_documents({})\n\n self.assertEqual(num, 72389)", "def size(self) -> int:\n return sum(p.size for p in self.iterfiles())", "def count_total(self):\n total = 0\n rpk_total = 0.0\n with open(self.filename, 'rU') as my_htseq:\n for line in my_htseq:\n if '_' not in line:\n line = line.rstrip('\\n').split('\\t')\n ensg_id = line[0]\n gene_len = len(set(self.gtf.gene_coords[ensg_id])) / 1000.0\n count = int(line[1])\n total += count\n rpk_total += float(count/gene_len)\n return total, rpk_total", "def Results(self):\r\n try:\r\n numOfFiles = 0\r\n file = str(filenames).split(',')\r\n for file in filenames:\r\n if os.path.exists(file):\r\n numOfFiles += 1\r\n print('%d' % numOfFiles + ' videos resized!')\r\n info = 'totaltime: ' + str(datetime.timedelta(seconds=totaltime))\r\n print(info)\r\n except NameError:\r\n info = ''\r\n print('no totaltime passed')\r\n return info", "def getNumTimeDataFiles(self):\n return self.nTimeDataFiles", "def get_total_view_count(self):\n done = self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos\")\n count = self.cur.fetchone()[0]\n return count", "def get_number_of_files(directory: str):\n\n number_of_files = len([item for item in os.listdir(directory) if os.path.isfile(os.path.join(directory, item))])\n print(number_of_files)\n return number_of_files", "def get_number_files(dataset):\n HOME = os.environ['HOME']\n # cmds = ['das_client.py', '--query', 'summary dataset=%s' % dataset, '--format=json',\n # '--key=%s/.globus/userkey.pem' % HOME, '--cert=%s/.globus/usercert.pem' % HOME]\n cmds = ['das_client.py', '--query', 'summary dataset=%s' % dataset, '--format=json']\n output = subprocess.check_output(cmds, stderr=subprocess.STDOUT)\n summary_dict = json.loads(output)\n return int(summary_dict['data'][0]['summary'][0]['nfiles'])", "def sum_pandas(self):\n return len(self.panda_files)", "def print_all_files(self):\n\n print(\"db path/name (filesize, md5sum) F disk path/name (filesize, md5sum)\")\n allfiles = set(self.files_from_db).union(set(self.files_from_disk))\n fdisk_str = \"\"\n # loop over all found files\n for fname in allfiles:\n # if the file name is in the DB list\n if fname in self.files_from_db:\n finfo = self.files_from_db[fname]\n fullname = f\"{finfo['path']}/{fname}\"\n filesize = None\n if 'filesize' in finfo:\n filesize = finfo['filesize']\n md5sum = None\n if 'md5sum' in finfo:\n md5sum = finfo['md5sum']\n\n fdb_str = f\"{fullname} ({filesize}, {md5sum})\"\n else:\n fdb_str = \"\"\n # if the file name is in the disk list\n if fname in self.files_from_disk:\n finfo = self.files_from_disk[fname]\n fullname = f\"{finfo['relpath']}/{fname}\"\n filesize = None\n if 'filesize' in finfo:\n filesize = finfo['filesize']\n md5sum = None\n if 'md5sum' in finfo:\n md5sum = finfo['md5sum']\n\n fdisk_str = f\"{fullname} ({filesize}, {md5sum})\"\n else:\n fdisk_str = \"\"\n # not whether they are the same or not\n comp = 'X'\n if fname in self.comparison_info['equal']:\n comp = '='\n\n print(f\"{fdb_str:-140s} {comp} {fdisk_str:-140s}\")", "def dashboard_alarm_get_total_records(self):\n return self.request( \"dashboard-alarm-get-total-records\", {\n }, {\n 'count': [ int, False ],\n } )", "def count_deleted_bytes(self): # DirObj.count_deleted_bytes\n bytes=0\n for name, d in self.subdirs.iteritems():\n bytes = bytes + d.count_deleted_bytes()\n for name, f in self.files.iteritems():\n if f.deleted:\n bytes = bytes + f.count_deleted_bytes()\n return bytes", "def calculate_total_size(apps, schema_editor):\n Data = apps.get_model(\"flow\", \"Data\")\n for data in Data.objects.all():\n hydrate_size(data, force=True)\n data.save()", "def __len__(self):\n return len(self.files)", "def db_print_table_rows_cnt(db_path, table_name):\n path_exist = os.path.exists(db_path)\n if path_exist is False:\n print '!!!Error, database does not exist.'\n return\n\n try:\n with db.connect(db_path) as conn:\n cursor = conn.cursor()\n print(\" Table Name : '%s'\" % table_name)\n # Prepare and execute SQL statement\n sql = ('SELECT COUNT(*) FROM {}').format(table_name)\n cursor.execute(sql)\n count = cursor.fetchall()\n print(\" Total Rows : %s\" % count[0][0])\n except (db.OperationalError) as e:\n print(\"!!!Error, %s\" % repr(e))", "def db_print_table_rows_cnt(db_path, table_name):\n path_exist = os.path.exists(db_path)\n if path_exist is False:\n print '!!!Error, database does not exist.'\n return\n\n try:\n with db.connect(db_path) as conn:\n cursor = conn.cursor()\n print(\" Table Name : '%s'\" % table_name)\n # Prepare and execute SQL statement\n sql = ('SELECT COUNT(*) FROM {}').format(table_name)\n cursor.execute(sql)\n count = cursor.fetchall()\n print(\" Total Rows : %s\" % count[0][0])\n except (db.OperationalError) as e:\n print(\"!!!Error, %s\" % repr(e))", "def totalbytes(self):\n with self.session as session:\n result = session.execute(select([func.sum(IndexRecord.size)])).scalar()\n if result is None:\n return 0\n return long(result)", "def Total_Assets(stock):\n return _factor(_open_file(stock, 'balance'), 'Total Assets')", "def read_counter(self, path):\n self.cursor.execute('SELECT * FROM \"counter\" WHERE \"fullpath\"=?', (path,))\n row = self.cursor.fetchone()\n count = 0\n if row != None : count = row[1]\n # print 'read_counter:', path, count\n return count", "def max_files(self):\n\n return 10 ** self.int_len(self.cnt_files())", "def get_num_files(self, file_type):\n return self.file_type_counter.get(file_type, 0)", "def _summary(in_file):\n data = Counter()\n out_file = in_file + \"_size_stats\"\n if file_exists(out_file):\n return out_file\n with open(in_file) as in_handle:\n for line in in_handle:\n counts = int(line.strip().split(\"_x\")[1])\n line = in_handle.next()\n l = len(line.strip())\n in_handle.next()\n in_handle.next()\n data[l] += counts\n with file_transaction(out_file) as tx_out_file:\n with open(tx_out_file, 'w') as out_handle:\n for l, c in data.items():\n out_handle.write(\"%s %s\\n\" % (l, c))\n return out_file", "def download_files(df, workdir):\n size = 0\n \n for index, row in df.iterrows():\n filename = os.path.join(workdir, 'song_' + str(index) + '.mp3')\n\n url = row['Download URLs'] \n\n if index%10==0:\n print(index, \"Current Time =\", datetime.now().strftime(\"%H:%M:%S\"))\n \n now = datetime.now()\n try:\n size += download_file_(url, filename)\n except:\n continue\n \n return(size)", "def _count_data(path):\n matcher = re.compile(r'[0-9]+\\.dec')\n match = lambda name: bool(matcher.match(name))\n names = os.listdir(path)\n n_data = len(list(filter(match, names)))\n return n_data", "def get_download_info(files):\n file_paths = [] # the files we need to check\n file_count = 0 # count of each file in files\n total_size = 0\n\n all_product_types = []\n for ring_obs_id in files:\n for product_type in files[ring_obs_id]:\n for f in files[ring_obs_id][product_type]:\n\n all_product_types.append(product_type)\n\n if product_type != 'preview_image':\n # this is a pds file not a browse product\n # collect the urls.. we will process these at the end\n file_paths += [f for f in files[ring_obs_id][product_type]] # list of all urls\n\n elif product_type == 'preview_image':\n # the file size of each preview images on disc is checked here\n # todo: OMG WHY WHAT\n # todo: get the file sizes into database instead = process like pds files and remove this whole section!\n\n from results.views import get_base_path_previews\n try:\n size = getsize(f)\n total_size += size\n file_count = file_count + 1\n except OSError:\n log.error('could not find file: ' + f)\n\n all_product_types = list(set(all_product_types)) # make unique\n # now we have all pds file_names, put all file names in a list and get their count\n if file_paths:\n\n file_names = list(set([ get_file_path(u) for u in file_paths]))\n file_count += len(file_names)\n\n # query database for the sum of all file_names size fields\n file_sizes = FileSizes.objects.filter(name__in=file_names, PRODUCT_TYPE__in=all_product_types).values('name','size','volume_id').distinct()\n total_size += sum([f['size'] for f in file_sizes]) # todo: this is here b/c django was not happy mixing aggregate+distinct\n\n return total_size, file_count # bytes", "def total_progress(self):\n progressbar.reset()\n if self.root.compare_select.get():\n progressbar.add_total(1)\n if self.root.update_select.get():\n progressbar.add_total(1)\n if self.root.docs_select.get():\n progressbar.add_total(2)\n if self.root.ills_select.get():\n progressbar.add_total(1)", "def test_task_count_total(self):\r\n tasks.count_total()\r\n\r\n stat = StatBookmark.query.first()\r\n self.assertEqual(stat.attrib, stats.TOTAL_CT)\r\n self.assertEqual(stat.data, 4)", "def len(self):\n # print(self.processed_file_names)\n return self.len_", "def __len__(self) -> int:\n return len(self.files)", "def count_data_items(fileids, train=True):\n sizes = 28000 if train else 22500\n return len(fileids) * sizes", "def total_rows(cursor, table_name, print_out=False):\n\tc.execute('SELECT COUNT(*) FROM {}'.format(table_name))\n\tcount = c.fetchall()\n\tif print_out:\n\t\tprint('\\nTotal rows: {}'.format(count[0][0]))\n\treturn count[0][0]", "def get_total_file_size(self, file_type):\n return self.file_size_counter.get(file_type, 0)", "def amount_total(path, file_type):\n final_frame = clean_kdr_data(path, file_type)\n amount_work = final_frame.groupby(\"Date\")[\"Place\"].count()\n amount_work = amount_work.to_frame()\n amount_work.columns = [\"Freq\"]\n\n # Dropping outlier data\n amount_work = amount_work.drop([\"2019-01-04\"])\n amount_work = amount_work.drop([\"2019-01-07\"])\n\n return amount_work", "def bpCount(file):\n amount_bp = len(file)\n return amount_bp", "async def people(self, context):\n collection = db['people']\n person_count = []\n count_dict = {}\n for person in collection.find({}, {'_id': 0, 'person': 1}):\n person_count.append(person['person'])\n for person in list(set(person_count)):\n count_dict[person] = person_count.count(person)\n person_print = [f'`{k.capitalize()}: {v}`\\t' for k, v in sorted(count_dict.items())]\n\n await context.send('Current Image Totals:\\n')\n await context.send(''.join(person_print))", "def get_files(self):\n\n cur = self.app.conn.cursor()\n sql = \"select distinct case_text.fid, source.name from case_text join source on case_text.fid=source.id where \"\n sql += \"caseid=? order by lower(source.name) asc\"\n cur.execute(sql, [self.case['caseid'], ])\n self.casefiles = cur.fetchall()\n sql = \"select id, name, fulltext, mediapath, memo, owner, date, av_text_id from source order by source.name asc\"\n cur.execute(sql)\n self.allfiles = cur.fetchall()\n msg = _(\"Files linked: \") + str(len(self.casefiles)) + \" / \" + str(len(self.allfiles))\n self.ui.label_files_linked.setText(msg)", "def size(self, *args) -> \"int64\":\n return _ida_fpro.qfile_t_size(self, *args)", "def totaltasks(conn):\n c = conn.cursor()\n r = c.execute(\"SELECT count(id) as total_tasks FROM event WHERE type_id = \" + taskid(\"run_task\")).fetchall()\n return r[0]['total_tasks']", "def total_volume(self):", "def count(train_dir):\r\n path = train_dir\r\n count = 0\r\n for fn in os.listdir(path): #fn 表示的是文件名\r\n count = count + 1\r\n return count", "def total(self):\n return self._results.total", "def count_len(self):\n total = 0\n for filename in self.filenames:\n f = open(os.path.join(self.directory, filename))\n line_count = 0\n for _ in f:\n line_count += 1\n if line_count < self.window_size:\n continue\n else:\n total += line_count - self.window_size + 1\n return total", "def total_volume(self) -> int:\n total = 0\n for i in self.order_items:\n total += i.total_volume\n return total", "def display_sum_sold():\n sold_games = reports.sum_sold(filename)\n print(\n \"Total sold copies from {} file is: {} millions\\n\".format(\n filename,\n sold_games))", "def total(evictiondata):\r\n total = 0\r\n for index, row in evictiondata.iterrows():\r\n total += row['filings_2020']", "def output_queue_size(self):\r\n results_dirname = get_param('results_dir')\r\n filename = os.path.join(results_dirname,\r\n '%s_%s' % (get_param('file_prefix'),\r\n 'queued_tasks'))\r\n queued_tasks_file = open(filename, 'w')\r\n queued_tasks_file.write('time\\ttotal_queued_tasks\\n')\r\n for time, queued_tasks in self.enqueued_tasks:\r\n queued_tasks_file.write('%s\\t%s\\n' % (time, queued_tasks))\r\n queued_tasks_file.close()" ]
[ "0.75531894", "0.70216966", "0.6991882", "0.69044584", "0.69035006", "0.68878055", "0.68755126", "0.6699878", "0.6671501", "0.65564287", "0.6547608", "0.64975446", "0.64903414", "0.64636934", "0.64395994", "0.640908", "0.63254476", "0.6294701", "0.6255073", "0.6235836", "0.6217271", "0.62171465", "0.61457425", "0.6128372", "0.61271936", "0.6125352", "0.6119997", "0.609902", "0.6075574", "0.60493577", "0.6049172", "0.60373276", "0.6015935", "0.60093707", "0.5999704", "0.5961675", "0.5947965", "0.5947958", "0.594246", "0.594202", "0.59340096", "0.59169227", "0.59012157", "0.5858565", "0.58548176", "0.5850587", "0.58338124", "0.5815078", "0.58051276", "0.5778289", "0.57757133", "0.57723534", "0.5760496", "0.57476884", "0.57278436", "0.57247555", "0.5724204", "0.5710787", "0.5703217", "0.5701579", "0.56872195", "0.5683128", "0.5674912", "0.56718504", "0.56672645", "0.56463414", "0.56459713", "0.5645032", "0.56346154", "0.56346154", "0.5627312", "0.5625915", "0.5624069", "0.5621095", "0.5620746", "0.5620533", "0.5618617", "0.56131697", "0.55952823", "0.55743307", "0.55671805", "0.55603796", "0.5555363", "0.5546379", "0.55446684", "0.5543748", "0.5542537", "0.5541807", "0.5530304", "0.55299103", "0.5528179", "0.5524127", "0.55227184", "0.5520808", "0.5509168", "0.5507134", "0.5506041", "0.55025905", "0.55000144", "0.54962707" ]
0.67092466
7
Delete file from database
async def delete(bot, message): reply = message.reply_to_message if reply and reply.media: msg = await message.reply("Processing...⏳", quote=True) else: await message.reply('Reply to file with /delete which you want to delete', quote=True) return for file_type in ("document", "video", "audio"): media = getattr(reply, file_type, None) if media is not None: break else: await msg.edit('This is not supported file format') return result = await Media.collection.delete_one({ 'file_name': media.file_name, 'file_size': media.file_size, 'mime_type': media.mime_type }) if result.deleted_count: await msg.edit('File is successfully deleted from database') else: await msg.edit('File not found in database')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_db(self):\n import os.path\n os.remove(self.filepath)", "def delete(self, filename):\n pass", "def delete_file(file_id):\n file_obj = Data.objects.get(id=file_id)\n print(\"Removing file: \", file_obj.name)\n print(file_obj.file.path)\n file_dir = file_obj.file.path\n os.remove(file_dir)\n print(\"Done.\")", "def delete(self):\n if not pdbox._args.get(\"dryrun\"):\n result = execute(pdbox.dbx.files_delete_v2, self.path)\n pdbox.debug(\"Metadata response: %s\" % result.metadata)\n pdbox.info(\"Deleted %s\" % self.uri)", "def delete(self, filename):\n raise NotImplementedError", "def delete(self, host, file):", "def delete(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n\n try:\n os.remove(file_path)\n except OSError:\n pass", "def deleteFileRecordByID(file_id):\n session = Queries.createSession()\n try:\n file_db = session.query(FileTable).filter_by(id=file_id).first()\n servers = file_db.server_id[:]\n for server in servers:\n file_db.server_id.remove(server)\n session.commit()\n session.delete(file_db)\n session.commit()\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()", "def delete_from_db(image):\n db.session.delete(image)\n db.session.commit()", "def delete_file(sender, instance, **kwargs):\n if bool(instance.exam_file): # check if exam file exists\n try:\n instance.exam_file.delete()\n except OSError:\n pass\n # if exam file has already been deleted, then do nothing and continue\n # with deleting the exam model", "def remove(self, path):\n path = path.decode('utf8')\n cursor = self._dbcon.cursor()\n filename = os.path.basename(path)\n dirname = os.path.dirname(path)\n t = (dirname, filename)\n sql = u\"delete from books where path = ? and filename = ?\"\n cursor.execute(sql, t)\n self._dbcon.commit()\n cursor.close()", "def delete(self, *args, **kwargs):\n self.file.storage.delete(self.file.name)\n super().delete(*args, **kwargs)", "def delete_file(filename):\n\tprint client.file_delete(filename)", "def delete(self):\n if os.path.exists(self.file_path):\n os.remove(self.file_path)", "def erase_db(file):\n open(file, 'w').close()", "def delete(self):\n\n try:\n remove(self.file)\n except OSError:\n pass", "def delete_file(self, lfile):\n raise NotImplementedError('delete_file')", "def delete_file(sender, instance, *args, **kwargs):\n if instance.file:\n _delete_file(instance.file.path)", "def delete():", "async def delete_file(location_id: LocationID, file_id: StorageFileID, user_id: UserID):", "def delete_photo(photo):\n\n\tfilename = \"%s/%s\" % (current_app.instance_path, photo.url)\n\ttry:\n\t\tos.remove(filename)\n\texcept:\n\t\t# The file doesn't exist.\n\t\tpass\n\n\tdb = get_database()\n\tdb.session.delete(photo)\n\tdb.session.commit()", "def delete_file(self, hash):\n self.tree.delete(hash)\n query = \"delete from files where hash='%s'\"%hash\n self.connection.execute(query)\n self.connection.commit()", "def delete( self ):\n if os.path.exists(self.filename):\n os.remove(self.filename)", "def delete_db(self, filename, stored_master, entered_master):\n if os.path.isfile(filename):\n if stored_master == entered_master:\n # first clear the data\n spinner = Halo(text=colored(\"Deleting all password data...\", \"red\"), spinner=self.dots_, color=\"red\")\n jfile = {}\n with open(filename, 'w') as jdata:\n json.dump(jfile, jdata)\n # then delete the file\n os.remove(filename)\n spinner.stop()\n else:\n raise MasterPasswordIncorrect\n else:\n raise PasswordFileDoesNotExist", "def delete_file(self, filepath):\n self.ftp.delete(filepath)", "def predio_delete(sender, instance, **kwargs):\n instance.dataFile.delete(False)", "def delete(self, file_id: str):\n file_path = self._path_to_file(file_id)\n os.remove(file_path)\n del self.index[file_id]", "def delete(self):\n\t\t#self.log.info(\"Deleting file {}\".format(self._filepath))\n\t\tos.remove(self._filepath)", "def _delete_datafile(sender, instance, **kwargs):\n instance.delete_datafile(save_instance=False)", "def db_remove():\n\n db.session.close()\n db.drop_all()\n\n path = current_app.config['SNER_VAR']\n for file_object in os.listdir(path):\n file_object_path = os.path.join(path, file_object)\n if os.path.isdir(file_object_path):\n shutil.rmtree(file_object_path)\n else:\n os.unlink(file_object_path)", "def delete_file(request, page_id, file_id):\n record = models.FileStore.get_by_id(int(file_id))\n if record:\n if not record.user_can_write(request.profile):\n return utility.forbidden(request)\n\n record.delete()\n return utility.edit_updated_page(page_id, tab_name='files')\n else:\n return utility.page_not_found(request)", "def delete_path():\n #TODO delete path from database\n pass", "def delete_file(sender, instance, *args, **kwargs):\n if instance.photo:\n _delete_file(instance.photo.path)", "def delete_image(db, filename, usernick):\n cur = db.cursor()\n sql = \"\"\"\n delete from images where filename=? and usernick=?;\n \"\"\"\n cur.execute(sql, (filename, usernick))\n db.commit()\n\n sql = \"\"\"\n delete from likes where filename=?;\n \"\"\"\n cur.execute(sql, (filename,))\n db.commit()", "def delete(self, *route, **req_data):\n # Read the file ID from the request, with safety.\n try:\n file_id = UUID(req_data['file_id']).hex\n except ValueError:\n return Response(status='400 Bad Request')\n\n # Retrieve and delete the file.\n stored_files = StoredFile.collection()\n to_delete = stored_files.first(id=file_id)\n\n log_activity('%s deleted file %s'%(\n context.user.link, to_delete.filename\n ))\n\n stored_files.delete(to_delete)\n get_bucket().delete(to_delete.data_id)\n\n return Response(status='200 OK')", "def delete_file(mapper, connection, target):\n if target.filename and app.config['CLEANUP_FILES']:\n try:\n os.remove(join(app.config['FILE_PATH'], str(target.talk.id),\n str(target.version), target.filename))\n except OSError:\n # We don't care if wasn't deleted because it does not exist\n pass", "def safe_delete(self, filename):\n try:\n os.remove(filename)\n except OSError:\n pass", "def delete_file(path):\n return files.delete_file(path)", "def delete(self, remote):\n self.target.ttbd_iface_call(\"store\", \"file\", method = \"DELETE\",\n file_path = remote)", "def delete_file(sender, instance, *args, **kwargs):\n if instance.image:\n _delete_file(instance.image.path)", "def delete_file(self, path):\n return self.client._perform_empty(\n \"DELETE\", \"/projects/%s/managedfolders/%s/contents/%s\" % (self.project_key, self.odb_id, utils.quote(path)))", "def delete(self):\n\n cursor = self._conn.cursor()\n cursor.execute(\"DELETE FROM saves\")\n self._conn.commit()", "def delete(self, *args, **kwargs):\n self.file.delete(save=False)\n self.thumbnail.delete(save=False)\n\n super(File, self).delete(*args, **kwargs)", "def delete_image_from_database(full_image_path):\r\n\r\n logging.debug('delete_image_from_database({})'.format(full_image_path))\r\n\r\n dir_path = os.path.join(os.environ['LOCALAPPDATA'],'WarietyWallpaperImages')\r\n os.makedirs(dir_path, exist_ok=True)\r\n db_file = os.path.join(dir_path,'wariety.db')\r\n conn = sqlite3.connect(db_file)\r\n c = conn.cursor()\r\n\r\n # Select a row\r\n c.execute(\"DELETE FROM wallpapers WHERE ipath = ?\", (full_image_path,))\r\n conn.commit()\r\n conn.close()", "def delete(self):\n\n\n try:\n db = getDatabase()\n connection = db.connect()\n\n connection.delete(self)\n except Exception as e:\n raise e\n finally:\n db.dispose()", "def delete(self):\n os.remove(self.file_path)\n super(VideoFile, self).delete()", "def delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def delete(self, resource_id, file_id):\n d = Deposition.get(resource_id, user=current_user)\n\n # Sort files raise ForbiddenAction if not authorized\n df = d.remove_file(file_id)\n if df is None:\n abort(404, message=\"File does not exist\", status=404)\n df.delete()\n d.save()\n return \"\", 204", "def auto_delete_file_on_delete(sender, instance, **kwargs):\n if instance.document:\n if os.path.isfile(instance.document.path):\n os.remove(instance.document.path)", "def delete_file(self, name):\n del self.files[name]", "def delete_users(self, filename):\n f_id = self.face.FACES.files.find_one({ \"filename\" : filename }, { \"_id\" : 1 })\n self.face_fs.delete(f_id['_id'])", "def delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def delete(id=None):\n donation = db.session.query(Donation).get(id)\n if donor and donation.filename:\n try:\n filename = secure_filename(file.filename)\n file.delete(os.path.join(current_app.config['UPLOAD_FOLDER'], filename))\n except:\n pass\n db.session.delete(donation)\n db.session.commit()\n return redirect(url_for('.index'))", "def storage_delete_report_file(self, report_pk):\n self._get_queryset(pk=report_pk).delete()", "def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def sorl_delete(**kwargs):\n from sorl.thumbnail import delete\n delete(kwargs['file'])", "def delete_table(table, db_file):\n \n try:\n conn, c = connect_to_db(db_file)\n c.execute(\"DROP TABLE IF EXISTS \" + safe(table) + \";\")\n conn.close()\n except Exception as e:\n print(\"Error when trying to delete table \" + table + \" in database file \" + db_file)\n print(e)\n return False\n else:\n return True", "def delete_file(self, msg_parameters):\n reg = self.get_regex_file_name(msg_parameters[0])\n for file_part in self.files.keys():\n if reg.search(file_part) is not None:\n # remove from computer\n os.remove(self.files[file_part])\n # do not save that the data server has it\n del self.files[file_part]", "def delete():\n\n from slicr.extensions import db\n\n click.echo('deleting database...')\n\n db.drop_all()", "def remove(self, db_name):\n path = self.get_path(db_name)\n os.remove(path)", "def _delete(filename):\n return os.remove(filename)", "def auto_delete_file_on_delete(sender, instance, **kwargs):\n if instance.file:\n if os.path.isfile(instance.file.path):\n os.remove(instance.file.path)", "def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def deleteDocumentFromPhone(file):\n\tprint \"Removing %s from target device...\" % file\n\tcmd =r\"adb shell rm -r %s\" % file\n\tos.system(cmd)\n\tprint \"Finished removing file from phone.\"", "def delete_file(self, path):\n raise HTTPError(\n 501,\n \"Narrative deletion not implemented here. Deletion is handled elsewhere.\",\n )", "def _delete(self, remote_filename):\n\n file_id = self.get_file_id(remote_filename)\n if file_id is None:\n raise BackendException(\n 'File \"%s\" cannot be deleted: it does not exist' % (\n remote_filename))\n response = self.http_client.put(self.metadata_url + 'trash/' + file_id)\n response.raise_for_status()\n del self.names_to_ids[remote_filename]", "def test_d_remove_database(self):\n\n if os.path.isfile(location):\n os.remove(location)\n\n assert(True)", "def deleteDB():\n db = sqlite.connect(db_path)\n db.row_factory = sqlite.Row\n cursor = db.cursor()\n cursor.execute(\"DELETE from rooms\")\n\n cursor.execute(\"DELETE from users\")\n\n cursor.execute(\"DELETE from urls\")\n\n cursor.fetchall()\n db.commit()\n cursor.close()\n db.close()", "def remove_file(self, path):\n pass", "def auto_delete_file_on_delete(sender, instance, **kwargs):\n\n if instance.file:\n if os.path.isfile(instance.file.path):\n os.remove(instance.file.path)", "def delete_file(file: str) -> None:\n\tuux.show_info(\"Deleting \" + file)\n\n\tif not os.path.exists(file):\n\t\t# Files does not exist\n\t\treturn\n\n\tos.remove(file)", "def del_entry(dbfile):\n\n conn = sqlite3.connect(dbfile)\n c = conn.cursor()\n c.execute(\"\"\"\n DELETE FROM bringatrailer WHERE id = (SELECT MAX(id) FROM bringatrailer)\n \"\"\")\n conn.commit()\n conn.close()", "def Delete(self):\n\n self.db.ExecuteSql('delete from tracks where id=%d;'\n % self.persistant['id'])\n self.db.ExecuteSql('commit;')", "def delete_file(self, filename=None):\n return self._service.delete_object(self._datasets_id, filename)", "def deleteReference(self, databaseFile):\n logger.debug(\"Func: deleteReference\")\n\n #ADMIN ACCESS\n jsonInfo = self._loadJson(databaseFile)\n if jsonInfo == -2:\n return -2\n\n if jsonInfo[\"ReferenceFile\"]:\n try:\n referenceFile = jsonInfo[\"ReferenceFile\"]\n os.remove(os.path.join(self.projectDir, jsonInfo[\"ReferenceFile\"]))\n jsonInfo[\"ReferenceFile\"] = None\n jsonInfo[\"ReferencedVersion\"] = None\n self._dumpJson(jsonInfo, databaseFile)\n self.errorLogger(title=\"Deleted Reference File\", errorMessage=\"%s deleted\" %referenceFile)\n except:\n msg = \"Cannot delete reference file %s\" % (jsonInfo[\"ReferenceFile\"])\n logger.warning(msg)\n raise Exception([203, msg])\n pass", "def file_delete(self, path):\n params = {'root': self.session.root, 'path': format_path(path)}\n\n url, params, headers = self.request(\"/fileops/delete\", params)\n\n return self.rest_client.POST(url, params, headers)", "def deleteSingleFile(filename):\n os.popen('rm {}'.format(filename))", "def delete_file(self, path):\n if not path_exists(path, self._store_folder):\n raise NotFoundException(\"\")\n os.remove(path)", "def delete_image(self):\n Image.objects.get(id = self.id).delete()", "def auto_delete_file_on_delete(sender, instance, **kwargs):\r\n if instance.path:\r\n if os.path.isfile(instance.path.path):\r\n os.remove(instance.path.path)", "def delete_file(self, name, container):\r\n try:\r\n cnt = self.get_container(container)\r\n obj = cnt.get_object(name)\r\n obj.delete()\r\n return True\r\n except:\r\n return False", "def delete(self, store, uuid):\n\n session = get_session()\n session.begin()\n\n stored_file = self._retrieve(store.object_type, uuid)\n\n try:\n session.delete(stored_file)\n session.commit()\n finally:\n session.close()", "def removePostFromDb(photo_name):\n connection = sqlite3.connect(homePath + DBname)\n cursor = connection.cursor()\n cursor.execute(\"DELETE FROM photo WHERE photo_name == (?);\", (photo_name,))", "def delete(self):\n with sqlite3.connect(self.dbpath) as connection: \n cursor = connection.cursor()\n DELETESQL = \"\"\"DELETE FROM accounts WHERE id=:id \"\"\"\n cursor.execute(DELETESQL, {\"id\": self.id})\n self.id = None", "def delete(self):\n request_data = request.get_json(force=True)\n current_path = self.get_current_path()\n file_name = request_data.get('file_name')\n\n if not file_name:\n abort(400, message=\"File name must not be empty!\")\n\n full_path = os.path.join(current_path, file_name)\n\n if not os.path.exists(full_path):\n abort(400, message=\"File was not found in current path!\")\n\n if not os.path.isfile(full_path):\n abort(400, message=\"File name is not a file!\")\n\n if not self.is_allowed(full_path):\n abort(403, message=\"You are not allowed to this path\")\n\n os.remove(full_path)\n\n return {\"message\": \"OK\"}", "def delete_file(self, key):\n path = os.path.join(self.directory, self.subdirectory, key)\n if os.path.isfile(path):\n os.unlink(path)\n else:\n raise ValueError(f\"No such file: {key}\")", "def Delete_File(self,tx,filename):\n if tx != self.tx:\n raise InvalidTransaction(tx)\n\n fullname = os.path.join(self.home,filename)\n win32_txf.DeleteFileTransacted(fullname,transaction = tx)", "def Delete_File(self,txn,filename):\n opid = self.new_opid()\n xaction = DeleteFile_Operation(os.path.join(self.home,filename),opid)\n self._add_operation(txn,xaction)", "def deleteRow(self, database):\r\n self.conn = connect(\"database.sqlite\")\r\n self.cur = self.conn.cursor()\r\n self.cur.execute(\r\n f\"DELETE FROM {database} WHERE id=(SELECT MAX(id) FROM {database})\")\r\n self.conn.commit()\r\n self.cur.close()", "def delete_migration_data(import_type: migration.Migration, file_name: str):\n\n remove_path = app_dirs.MIGRATION_DIR.joinpath(import_type.value, file_name)\n\n if remove_path.is_file():\n remove_path.unlink()\n elif remove_path.is_dir():\n shutil.rmtree(remove_path)\n else:\n raise HTTPException(status.HTTP_400_BAD_REQUEST)", "def test_delete_with_file(self):\n langpack = LangPack.objects.create(version='0.1')\n file_path = langpack.file_path\n with public_storage.open(file_path, 'w') as f:\n f.write('sample data\\n')\n assert public_storage.exists(file_path)\n try:\n langpack.delete()\n assert not public_storage.exists(file_path)\n finally:\n if public_storage.exists(file_path):\n public_storage.delete(file_path)", "def delete_field(self, field):\n if field.type == 'keyword':\n query = '''select _keyword_id from keywords\n where _keyword=\"%s\"''' %field.name\n keyword_id = self.connection.execute(query).fetchall()[0][0]\n query = 'delete from keyword_x_file where _keyword_id=%d'%keyword_id\n self.connection.execute(query)\n query = 'delete from keywords where _keyword_id=%d'%keyword_id\n self.connection.execute(query)\n self.keywords.remove(field.name)\n else:\n query = 'alter table files drop column \"%s\"' % field.name\n self.connection.execute(query)\n self.connection.commit()\n self.init_fields()", "def delete_file(self, filename):\n if not filename in self.files:\n raise IOError('File %s Not Found' % filename)\n\n for nodename in self.files[filename]:\n node = self.datanodes[nodename]\n node.delete_file(filename)\n del self.files[filename]\n logging.info('file %s deleted' % filename)", "def delete_file(self, filename: str, directory: str = 'gcodes') -> Dict:\n raise NotImplementedError", "def tearDown(self):\n os.remove(self._dbfile)", "def db_delete_device_record(db_path, rec_name):\n path_exist = os.path.exists(db_path)\n if path_exist is False:\n print '!!!Error, database does not exist.'\n return\n\n try:\n with db.connect(db_path) as conn:\n cursor = conn.cursor()\n # Prepare and execute SQL statement (make sure the 'record_name'\n # parameter follows with comma to make it a tuple)\n sql = \"DELETE FROM Devices WHERE name=?\"\n cursor.execute(sql, (rec_name,))\n except (db.OperationalError) as e:\n print(\"!!!Error, %s\" % repr(e))", "def delete_file(request):\n if request.method != 'DELETE':\n return {\n 'status': 'error',\n 'message': 'only HTTP DELETE allowed',\n }\n body = json.loads(request.body)\n file_id = body.get('file_id', '')\n import_file = ImportFile.objects.get(pk=file_id)\n d = ImportRecord.objects.filter(\n super_organization_id=body['organization_id'],\n pk=import_file.import_record.pk\n )\n # check if user has access to the dataset\n if not d.exists():\n return {\n 'status': 'error',\n 'message': 'user does not have permission to delete file',\n }\n\n import_file.delete()\n return {\n 'status': 'success',\n }", "def _delete_binary(self, filename):\n\t\ttry:\n\t\t\tvalidation.required(filename, 'filename')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\tself.log.debug(\"_delete_binary(%s)\" % filename)\n\n\t\t@stack\n\t\tdef do_delete(void):\n\t\t\tif os.path.exists(filename):\n\t\t\t\tself.log.debug(\"file [%s] exists...deleting\" % filename)\n\t\t\t\ttry:\n\t\t\t\t\tos.unlink(filename)\n\t\t\t\texcept Exception, ex:\n\t\t\t\t\tself.log.warning(\"Unable to delete [%s] - %s\" % (filename, ex))\n\t\t\t\t\traise errors.APIError(ex)\n\t\t\telse:\n\t\t\t\tself.log.debug(\"file [%s] doesn't exist...\" % filename)\n\t\t\treturn file\n\n\t\td = Deferred()\n\t\td.addCallback(do_delete)\n\t\td.addCallback(lambda _: (0, _))\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\td.callback(0)\n\t\treturn d", "def delete_data(self):\n conn = self._connect_DB()\n cur = conn.cursor()\n cur.execute(\"DELETE FROM movie_table;\")\n self._close_connection(conn)" ]
[ "0.79601157", "0.7939549", "0.7724207", "0.76846415", "0.749681", "0.72521895", "0.72500366", "0.724998", "0.7248713", "0.72234935", "0.7212773", "0.7202836", "0.718874", "0.7165154", "0.71618164", "0.7144323", "0.7123797", "0.7113529", "0.7098491", "0.7044695", "0.7039676", "0.70378006", "0.7009089", "0.6998756", "0.6992148", "0.698207", "0.6972047", "0.69625115", "0.6959668", "0.69487643", "0.69390845", "0.69284254", "0.6924797", "0.68960416", "0.688419", "0.687114", "0.6845033", "0.6808176", "0.68069637", "0.6803563", "0.6773015", "0.67643076", "0.6760047", "0.6759939", "0.6749306", "0.67467886", "0.6744789", "0.6734944", "0.6720578", "0.6718494", "0.67122066", "0.6710382", "0.6696804", "0.6692718", "0.66846526", "0.6671687", "0.6662471", "0.66503274", "0.6639732", "0.6639597", "0.6636208", "0.66276044", "0.6626157", "0.6626157", "0.6620155", "0.66164356", "0.6602188", "0.66002584", "0.65873843", "0.6586632", "0.65740615", "0.6568536", "0.6563639", "0.65519017", "0.6543244", "0.65395635", "0.6539013", "0.65314144", "0.6525168", "0.65007794", "0.6499316", "0.6497343", "0.6493173", "0.6488941", "0.6487066", "0.64838904", "0.64813495", "0.64805514", "0.6474738", "0.6449565", "0.64329976", "0.6432475", "0.6427641", "0.642003", "0.641841", "0.6416976", "0.6415944", "0.64154845", "0.6408449", "0.64065695" ]
0.6551673
74
Add new contact to the database
def add_contact(self, contact): self.db.insert_contact(contact) return self.update_contacts()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_contact_to_db(self):\n self.init_db(self._testing)\n\n # make sure that the object is not in the db\n assert self.uid == \"\"\n\n self._insert_row_into_db(Contact.table_name, Contact.columns, self.values)\n\n # update this objects uid\n self.uid = self._get_id_of_last_row(Contact.table_name)", "def do_addContact(self, line):\n\t\tif not(self.db is None):\n\t\t\tcont = self.db.contact\n\t\t\tcontact_info = {\n\t\t\t\t'first_name': input(\"First name: \"),\n\t\t\t\t'surname': input(\"Surname: \"),\n\t\t\t\t'company': input(\"Company: \"),\n\t\t\t\t'address': input(\"Address: \"),\n\t\t\t\t'telephone': input(\"Telephone: \"),\n\t\t\t\t'email': input(\"Email: \")\n\t\t\t}\n\t\t\tcont.insert_one(contact_info)\n\t\telse:\n\t\t\tprint(\"You must open the existing database or create new one.\")", "def add_contact(contact):\n db = get_db()\n \n if contact.get_hash_name() not in db:\n db[contact.get_hash_name()] = json.loads(contact.json())\n write_db(db)\n else:\n sys.exit(logger.fail('fatal: contact already exists'))", "def add_contact(self, name, number, email, zipcode):\n \n new_contact = f\"{name}, {number}, {email}, {zipcode}\"\n contact_list = [name,number,email,zipcode]\n self.contacts.append(contact_list)\n self.save()\n print(f\"Thank you {new_contact} has been added to your contact book.\")", "def add_contact(self):\n contact_list = {}\n contact_list[self.my_number] = self.name\n connect_db = Database()\n connect_db.add_contact(self.name, self.my_number)", "async def post(self):\n await self.handle_request(self.contacts_new_api, 1)", "def add_contact(self, contact):\n\t\tclient_log.debug(f'Создание контакта {contact}')\n\t\treq = {\n\t\t\tACTION: ADD_CONTACT,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username,\n\t\t\tACCOUNT_NAME: contact\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tself.process_server_ans(get_message(self.transport))", "def add_contact(database, name: str, email: str, phone: int) -> None:\n # Searches the database for the the current contact (excel row)\n cursor = database.execute(\"SELECT DISTINCT name, email, phone FROM contacts \"\n \"WHERE name = ? AND email =? OR phone = ?\", (name, email, phone))\n # Assigns the cursor results to the 'row' variable\n row = cursor.fetchone()\n # print(row) # For debugging\n\n # This checks if the contact already exists in the database or not\n if row:\n print(\"\\n{}, {}, {} is already in the database.\".format(name, email, phone))\n # Add the contact to the 'duplicates' table to retain the info in case of any\n # discrepancies in the final database.\n database.execute(\"INSERT INTO duplicates VALUES (?, ?, ?)\", (name, email, phone))\n else:\n cursor.execute(\"INSERT INTO contacts VALUES (?, ?, ?)\", (name, email, phone)) # Add contact to db\n cursor.connection.commit()\n # print(\"{}, {}, {} added to database.\".format(name, email, phone)) # For debugging", "def add_contact(self):\n contact = Contact.create_contact()\n self.contact_list.append(contact)\n\n df = pd.read_csv('address_book.csv')\n #print(df)\n adf = pd.DataFrame({'FIRST NAME': [contact.first_name],\n 'LAST NAME': [contact.last_name],\n 'ADDRESS': [contact.address],\n 'CITY': [contact.city],\n 'STATE': [contact.state],\n 'ZIP CODE': [contact.zip],\n 'PHONE NUMBER': [contact.phone_number],\n 'EMAIL': [contact.email]})\n adf.to_csv('address_book.csv',mode='a', header=False, index=None)\n #storing all contacts in address_book.csv file\n \"\"\"with open(\"address_book.csv\", \"w\") as f:\n for contact in self.contact_list:\n f.write(f\"FIRST NAME -> {contact.first_name}\\n\"\n f\"LAST NAME -> {contact.last_name}\\n\"\n f\"ADDRESS -> {contact.address}\\n\"\n f\"CITY -> {contact.city}\\n\"\n f\"STATE -> {contact.state}\\n\"\n f\"ZIP CODE -> {contact.zip}\\n\"\n f\"PHONE NUMBER -> {contact.phone_number}\\n\"\n f\"EMAIL -> {contact.email}\\n\\n\")\"\"\"", "async def create_contact(dbcon: DBConnection, name: Optional[str], email: Optional[str],\n phone: Optional[str], active: bool) -> str:\n q = \"\"\"insert into contacts (name, email, phone, active) values (%s, %s, %s, %s)\"\"\"\n q_args = (name, email, phone, active)\n contact_id = await dbcon.operation(q, q_args)\n return contact_id", "def create(self,contact: Contact) -> bool:\n try:\n contact_new=ContactSet(name=contact.name,birthdate=contact.birthdate\n ,contact_type=contact.contact_type, description=contact.description, phone=contact.phone)\n db.session.add(contact_new)\n db.session.commit()\n return True\n except Exception as ex:\n app.logger.error('Error creating a new Contact. {}'.format(ex))\n return False", "def post(self):\n return Contacts().create_one(request.get_json())", "def AddContact(self, contact):\n\t\tcontact.group_membership_info = [gdata.contacts.data.GroupMembershipInfo(href=self.GetFirstGroupId())]\n\t\ttry:\n\t\t\tself.client.CreateContact(contact)\n\t\texcept gdata.client.RequestError:\n\t\t\tpass", "def add_contact(self, request, **kwargs):\n if request.data is None:\n return Response({'message': 'Invalid contact details'}, status=status.HTTP_400_BAD_REQUEST)\n if request.data.get('first_name') is None:\n return Response({'message': 'First name not provided'}, status=status.HTTP_400_BAD_REQUEST)\n\n contact_data = request.data.get('contact')\n for data in contact_data:\n print(data.get('phone'))\n try:\n parse_number = phonenumbers.parse(data.get('phone'), None)\n except Exception:\n return Response({'details': 'Invalid Phonenumber'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n if not phonenumbers.is_valid_number(parse_number):\n return Response({'details': 'Invalid Phonenumber entered'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n new_contact_data = ContactCreationAndUpdationMixin().create(request.data)\n group = self.get_object()\n group.contacts.add(new_contact_data)\n serializer_data = ContactSerializer(new_contact_data) \n return Response(serializer_data.data)", "def test_new_contact_is_added(db_session):\n new_contact = AddressBook(\n name=\"test_name\",\n phone=\"test_phone\",\n email=\"test_email\"\n )\n db_session.add(new_contact)\n query = db_session.query(AddressBook).all()\n assert len(query) == 1", "def add_contact_to_db(name, email, module_db_id):\n success = False\n if name is not None:\n try:\n done_email = email.lower().strip()\n validate_email(done_email)\n\n contact, created = Contact.objects.get_or_create(list_owner_id=module_db_id, email=email)\n if created and contact:\n contact.name_and_last_name = name\n contact.email = email\n contact.status = 1\n contact.save()\n success = True\n else:\n success = False\n except Exception as e:\n print(e.args)\n contact, created = Contact.objects.get_or_create(list_owner_id=module_db_id, email=email)\n if created and contact:\n contact.name_and_last_name = name\n contact.email = email\n contact.status = 0\n contact.save()\n success = True\n else:\n success = False\n\n return success, name, email", "def add_contact_to_db_by_one(name, email, module_db_id, contact_id):\n success = False\n if name is not None:\n try:\n done_email = email.lower().strip()\n validate_email(done_email)\n\n if contact_id:\n try:\n contact = Contact.objects.get(id=contact_id, list_owner_id=module_db_id)\n contact.name_and_last_name = name\n contact.email = email\n contact.status = 1\n contact.save()\n success = True\n except Contact.DoesNotExist:\n pass\n else:\n contact, created = Contact.objects.get_or_create(list_owner_id=module_db_id, email=email)\n if created and contact:\n contact.name_and_last_name = name\n contact.status = 1\n contact.save()\n success = True\n except Exception as e:\n print(e.args)\n\n return success, name, email", "def test_add_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n dictionary = org.as_dict()\n assert dictionary['contacts']\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']", "def add_contact(self):\n contact_mob_num = self._input_mob_num(\"-=\" * 30 + \"\\n\" + \"Please enter contact's mobile number to be added: \")\n if contact_mob_num == self._user.mob_num:\n print(\"You can't add yourself, IDIOT!!\")\n return self.homepage()\n \n found_contact = self.auth.get_users_by_MobNum(contact_mob_num)\n if found_contact != None:\n print('A user with Mobile number: \"{0}\", and User name: \"{1}\" is found'.format(found_contact.mob_num, found_contact.username))\n user_choice = self._int_input_in_range(\" (1) Add the found user. \\n (0) Back to Home page \\n Your choice: \" \n ,range_ = (0, 1))\n if user_choice:\n add_flag = self._user.add_contact(found_contact)\n if not add_flag:\n print('This user is already one of your contacts')\n return self.homepage()\n print(\"Contact added successfully\")\n else:\n self.homepage()\n else:\n print('This user mobile number has no matches')\n return self.homepage()", "def addcontact(name, address=None, phone=None, email=None):\n try:\n newid = str(r.incr(\"global:nextUserId\"))\n _setcontact(newid, name, address, phone, email)\n r.sadd(\"contacts\", newid)\n\n return _getcontact(newid)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def update_contact_in_db(self):\n self.init_db(self._testing)\n\n # making sure that the object is in the db\n assert not self.uid == \"\"\n\n self._update_row_in_db(Contact.table_name, Contact.columns, self.values_with_uid)", "def db_add_entry(person):\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n if person.name in db:\n print(\"Updating existing entry ..... {name}\\n\".format(name=person.name))\n else:\n person.new = True\n print(\"Adding new entry ..... {name}\".format(name=person.name))\n db[person.name.capitalize()] = person.phone\n db.sync()\n db.close()\n db_show_all()", "def test_new_contact_data(db_session):\n new_contact = AddressBook(\n name=\"test_name\",\n phone=\"test_phone\",\n email=\"test_email\"\n )\n db_session.add(new_contact)\n contact = db_session.query(AddressBook).all()\n assert contact[0].name == \"test_name\"\n assert contact[0].phone == \"test_phone\"\n assert contact[0].email == \"test_email\"", "def post(self):\n data = json.loads(request.data.decode())\n contact = Contacts(\n Email=data[\"email\"],\n TechRider=data[\"techRider\"],\n InputList=data[\"inputList\"],\n Backline=data[\"backline\"],\n Created=get_datetime(),\n )\n db.session.add(contact)\n db.session.commit()\n\n # The RFC 7231 spec says a 201 Created should return an absolute full path\n server = socket.gethostname()\n contents = \"Location: {}{}{}\".format(\n server,\n url_for(\"ContactsView:index\"),\n contact.ContactsID\n )\n\n return make_response(jsonify(contents), 201)", "def test_add_contacts(self):\n response = self.contacts.add(\"alex\", \"0708913841\")\n self.assertEqual(response, \"Successfully added contacts\" )", "def test_save_contact(self):\n self.new_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 1)", "def add_contact():\n return 'add contact'", "def test_save_contact(self):\n # .save_contact() is the save to contact function.\n # Test would check if an addition has been made to our contact list\n self.new_contact.save_contact()\n self.assertEqual(len(Contact.contact_list), 1)", "def addRecord(self):\n\n ## Saving recorded entries to the CRM and Mailings Database\n print(\"Saving entries to the CRM and Mailings database...\")\n db_connection.executeQuery(\"INSERT INTO dbo.CRM (f_name, l_name, company, address, city, county, state, zip, primary_phone, secondary_phone, email_address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.crm_company_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.address.title() + \"', '\" + self.city.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.county.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.state_code.upper() + \"', '\" + str(self.zip_code) + \"', '\" + self.phone_number + \"', '\" + self.phone_number_2 + \"' , '\" + self.email_address + \"'); COMMIT\")\n db_connection.executeQuery(\"INSERT INTO dbo.Mailings (name, company, address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \" \" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.company_name.replace(\"\\'\", \"\\'\\'\").title() + \"','\" + self.address + \" \" + self.city.title() + \" \" + self.county.title() + \" \" + self.state_code.upper() + \" \" + str(self.zip_code) + \"'); COMMIT\")", "def edit_contact(contact):\n db = get_db()\n \n if contact.get_hash_name() in db:\n db[contact.get_hash_name()] = json.loads(contact.json())\n write_db(db)\n else:\n sys.exit(logger.fail('fatal: contact does not exist'))", "def test_create_contact(self):\n \n url = reverse('contact-list')\n contact = self.get_dummy_contact()\n\n response = self.client.post(url, contact,\n format='json',\n HTTP_AUTHORIZATION=self.get_auth())\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Contact.objects.count(), 1)\n self.assertEqual(Contact.objects.get().email_address, contact['email_address'])", "def contact(request):\n ContactMessage.objects.create(\n datetime=saturn.now(),\n name=request.data['name'],\n email=request.data['email'],\n body=request.data['body']\n )\n\n return Response({'success': True})", "def do_adduser(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\tself.cl.add_contact()\n\t\telse:\n\t\t\tprint(\"To add contacts you need to open or create a book.\")", "def add_contact_to_google_account(self, i):\n\n self.add_contact_to_phone(i)", "def contact(request):\n name = request.POST.get('name', '')\n email = request.POST.get('email', '')\n msg = request.POST.get('msg', '')\n ContactUs.objects.create(name=name, email=email, msg=msg)\n messages.success(request, 'Submitted, Thank you!')\n return HttpResponseRedirect('/')", "def new_contact(self, context, payload):\n\n data = OntraportContact(\n contact_id= payload[\"data\"].get(\"id\"),\n first_name= payload[\"data\"].get(\"firstname\"),\n last_name= payload[\"data\"].get(\"lastname\"),\n email_address= payload[\"data\"].get(\"email\"),\n date= payload[\"data\"].get(\"date\"),\n office_phone= payload[\"data\"].get(\"office_phone\"),\n company= payload[\"data\"].get(\"company\"),\n title= payload[\"data\"].get(\"title\"),\n country= payload[\"data\"].get(\"country\"),\n zip_code= payload[\"data\"].get(\"zip\"),\n owner= payload[\"data\"].get(\"owner\"),\n unique_id= payload[\"data\"].get(\"unique_id\"),\n profile_image= payload[\"data\"].get(\"profile_image\")\n )\n return data.__dict__", "def add_contact(self, contact):\n assert self.contact_in_range(contact), 'Wrong KBucket.'\n try:\n self._contacts.remove(contact)\n except ValueError:\n pass\n\n if len(self._contacts) < constants.K:\n self._contacts.append(contact)\n else:\n raise FullBucketError('No space in bucket to insert contact')", "def contact():\n if request.method == \"POST\":\n mongo.db.contact.insert_one(request.form.to_dict())\n\n return jsonify(success=True)\n\n return render_template(\"contact.html\", page_title=\"Contact Us\")", "def addContact (self, dleseContributor):\n\t\tcontacts_el = self.selectSingleNode(self.dom, 'record:collection:contacts')\n\t\tif not contacts_el:\n\t\t\traise Exception, 'contacts node not found'\n\t\tel = XmlUtils.addElement(self.dom, contacts_el, 'contact')\n\t\t\n\t\tel.setAttribute (\"email\",dleseContributor.getEmail());\n\t\tel.setAttribute (\"name\", dleseContributor.getFullName());\n\t\tel.setAttribute (\"urlReport\", 'false');\n\t\tel.setAttribute (\"active\", 'false');", "def add_contact_data_to_contact_list(request, module_db_id):\n success = False\n errors = []\n data_obj = None\n if request.method == 'POST':\n try:\n json_obj = json.loads(request.body)\n new_name = json_obj.get('name', '')\n new_email = json_obj.get('email', '')\n contact_id = json_obj.get('id', '')\n list_db = ModuleContactListDB.objects.get(id=module_db_id)\n\n success, name, email = add_contact_to_db_by_one(\n new_name, new_email, list_db.id, contact_id)\n if success:\n contact_data = Contact.objects.get(email=email)\n data_obj = contact_data.as_dict\n else:\n errors = ['630400']\n except ModuleContactListDB.DoesNotExist:\n errors = ['620404']\n\n except Exception as e:\n errors = e.args\n\n data = {'data': data_obj,\n 'success': success,\n 'errors': errors}\n return json_response(data)", "def add(self, connection):\n id = len(self.contacts)\n self.contacts[id] = connection\n self.order.append(id)", "def create_contact_on_google(self, info):\n\n\t\twith open('client.pickle') as pickle_file:\n\t\t\tclient = pickle.load(pickle_file)\n\n\t\t#create contact in google\n\t\tnew_contact = gdata.contacts.data.ContactEntry()\n\n\t\t# Set the contact's name.\n\t\tnew_contact.name = gdata.data.Name( given_name=gdata.data.GivenName(text=info['name']), family_name=gdata.data.FamilyName(text=info['name']),\n\t\t\tfull_name=gdata.data.FullName(text=info['name']))\n\n\t\tnew_contact.content = atom.data.Content(text='Notes')\n\n\t\t# Set the contact's email addresses.\n\t\tnew_contact.email.append(gdata.data.Email(address=info['email'], primary='true', rel=gdata.data.WORK_REL, display_name=info['name']))\n\n\t\t# Set the contact's phone numbers.\n\t\tnew_contact.phone_number.append(gdata.data.PhoneNumber(text=info['phone'], rel=gdata.data.WORK_REL, primay='true'))\n\n\t\tcontact_entry = client.CreateContact(new_contact)\n\t\twebnotes.errprint(\"Contact's ID: %s\" % contact_entry.id.text)\n\n\t\twebnotes.conn.set_value(\"Contact\",self.name,\"contct_id\", contact_entry.id.text)", "def test_contact_exists(self):\n self.new_contact.save_contact()\n test_contact = Contact(\"Test\", \"User\", 254711223344, \"[email protected]\")\n test_contact.save_contact()\n contact_exists = Contact.contact_exist(254711223344)\n self.assertTrue(contact_exists)", "def create_person(conn, person, first_name, last_name):\n sql = ''' INSERT INTO person(firstname,lastname)\n VALUES(?,?) '''\n cur = conn.cursor() # cursor object\n cur.execute(sql, person)\n # print(str(cur.lastrowid))\n # return cur.lastrowid # returns the row id of the cursor object, the person id\n first_name.set('')\n last_name.set('')\n messagebox.showinfo('Success', 'Person Successfully Added to Database!')", "def create_or_update_contact(customer, entity):\n\tname = frappe.db.get_value('Contact', { 'entity_id': entity.get('entity_id') })\n\tif not name:\n\t\tcontact = frappe.new_doc('Contact')\n\telse:\n\t\tcontact = frappe.get_doc(\"Contact\", name)\n\n\tif not entity.get('firstname'):\n\t\treturn\n\t\n\tcontact.first_name = entity.get('firstname')\n\tcontact.last_name = entity.get('lastname')\n\tcontact.customer = customer.name\n\tcontact.customer_name = customer.customer_name\n\tcontact.entity_id = entity.get('entity_id')\n\tcontact.email_id = entity.get('email')\n\tcontact.save(ignore_permissions=True)", "def create_contact(contact, party_type, party):\n\tcontact = contact\t.split(\" \")\n\n\tcontact = frappe.get_doc({\n\t\t\"doctype\":\"Contact\",\n\t\t\"first_name\":contact[0],\n\t\t\"last_name\": len(contact) > 1 and contact[1] or \"\"\n\t})\n\tcontact.append('links', dict(link_doctype=party_type, link_name=party))\n\tcontact.insert()", "def add_person_to_db(self):\n fullname = self.AddPerson.add_person_to_db(self.sql)\n if fullname:\n self.fullname.setText(fullname)\n # likely same name as before so no triggered search\n self.search_people_by_name()", "async def add_contact_to_contact_group(dbcon: DBConnection, contact_group_id: int, contact_id: int) -> None:\n if not await contact_group_exists(dbcon, contact_group_id):\n raise errors.InvalidArguments('contact group does not exist')\n if not await contact_exists(dbcon, contact_id):\n raise errors.InvalidArguments('contact does not exist')\n q = \"\"\"replace into contact_group_contacts (contact_group_id, contact_id) values (%s, %s)\"\"\"\n q_args = (contact_group_id, contact_id)\n await dbcon.operation(q, q_args)", "def delete_contact_in_db(self):\n self.init_db(self._testing)\n\n # making sure that the object is in the db\n assert not self.uid == \"\"\n\n self._delete_row_in_db(Contact.table_name, (self.uid,))", "def create_contact(contact, party_type, party, email):\n\tcontact = contact.split(' ')\n\n\tcontact = frappe.get_doc({\n\t\t'doctype': 'Contact',\n\t\t'first_name': contact[0],\n\t\t'last_name': len(contact) > 1 and contact[1] or \"\"\n\t})\n\tcontact.append('email_ids', dict(email_id=email, is_primary=1))\n\tcontact.append('links', dict(link_doctype=party_type, link_name=party))\n\tcontact.insert()", "def add_contact(self, forename, surname, email, salutation=None, telephone=None, gsm=None,\n website=None, country=None, zipcode=None, city=None, street=None, number=None,\n language=None, gender=None, date_of_birth=None, description=None, newsletter=None, tags=None,\n automerge_by_name=False, automerge_by_email=False, custom_fields=None, tracking=None,\n tracking_long=None):\n\n # get all arguments\n data = self._clean_input_to_dict(locals())\n\n # argument validation\n if gender is not None and gender not in ['M', 'F', 'U']:\n raise InvalidInputError(\"Invalid contents of argument gender.\")\n\n tags = self._validate_type(tags, list)\n custom_fields = self._validate_type(custom_fields, dict)\n\n if country is not None:\n try:\n pycountry.countries.get(alpha2=country.upper())\n except:\n raise InvalidInputError(\"Invalid contents of argument country.\")\n\n if language is not None:\n try:\n pycountry.languages.get(iso639_1_code=language.lower())\n except:\n raise InvalidInputError(\"Invalid contents of argument language.\")\n\n if date_of_birth is not None and type(date_of_birth) != datetime.date:\n raise InvalidInputError(\"Invalid contents of argument date_of_birth.\")\n\n # convert data elements that need conversion\n data['add_tag_by_string'] = ','.join(data.pop('tags'))\n self._convert_custom_fields(data)\n\n if date_of_birth is not None:\n data['dob'] = time.mktime(data.pop('date_of_birth').timetuple())\n\n if newsletter is not None:\n data['newsletter'] = int(newsletter)\n\n data['automerge_by_name'] = int(automerge_by_name)\n data['automerge_by_email'] = int(automerge_by_email)\n\n return self._request('addContact', data)", "def add_rec(self):\n print(\"Write phone number:\")\n add_phone_number_input = input()\n print(\"Write name of the record:\")\n add_name_input = input()\n print(\"Write address:\")\n add_address_input = input()\n return self.storage.add(\n add_phone_number_input, add_name_input, add_address_input\n )", "def register(self, name, contact):\n return Registration(self.request).add(name, contact)", "def addUsertoDatabase(self):\r\n self.c.execute(\"\"\"INSERT INTO student_information VALUES (?,?,?)\"\"\",(self.name,self.password,self.budget,))\r\n self.con.commit()\r\n print(\"Added to Database Student..\")", "def add_customer(customer_id, name, lastname, home_address, phone_number, email_address, status,\n credit_limit):\n init_database()\n try:\n with database.transaction():\n new_customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n active_status=status,\n credit_limit=credit_limit\n )\n new_customer.save()\n logging.info('New customer, ID %s, added successfully.', customer_id)\n return True\n except peewee.IntegrityError as exc:\n logging.error('Error creating new customer with ID %s: %s.', customer_id, exc)\n return False\n finally:\n database.close()", "def create_contact(self, context, payload):\n\n if context.get('headers').get('api_key') is None or context.get('headers').get('app_id') is None:\n raise Exception(\"Please provide Api-Key and Api-Appid\")\n \n # Set headers\n headers = {\n \"Api-Key\": context.get('headers').get('api_key'),\n \"Api-Appid\": context.get('headers').get('app_id'),\n \"Content-Type\": \"application/json\"\n }\n\n response = requests.request(\"POST\", f'{self.url}Contacts', headers=headers, data=payload).text\n response = json.loads(response)\n response = response[\"data\"]\n return response", "def add_customer(customer_id, first_name, last_name, home_address, phone_number,\n email_address, is_active, credit_limit):\n try:\n LOGGER.info('Successfully connected to the database')\n\n with DATABASE.transaction():\n new_customer = Customer.create(customer_id=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n is_active=is_active,\n credit_limit=credit_limit)\n new_customer.save()\n LOGGER.info(\"Customer added successfully\")\n\n except IntegrityError as error:\n LOGGER.info(error)\n LOGGER.info('Error occurred')", "def save_contract(bid):\n title = 'Create contract -- ' + bid\n doc = data_active.find_one({'bid': bid})\n if doc is None:\n # error exit from function\n web_logging.error('bid= {} not found in \"data_active\"'.format(bid))\n flash('bid= {} not found in \"data_active\"'.format(bid))\n return redirect(url_for('lists'))\n\n # take only 10 chars as number\n doc['phone'] = doc['phone'].replace('-', '')\n doc['phone'] = doc['phone'][len(doc['phone'])-10:]\n\n search_number = numbers.find_one({'numbers': {'$eq': doc['phone']}})\n contract = Contract()\n contract.from_lists(doc)\n\n if search_number is None:\n # no contacts with such number, call create new contact form\n form_doc = {'city': doc['location'], 'numbers': doc['phone'], 'comment': doc['comment'],\n 'loc_comments': doc['comment']}\n info = '--------- New Contact ------------'\n else:\n form_doc = Contact().to_form(search_number)\n info = '========= Contact already known, please check ======'\n\n form_doc.update(contract.as_dict())\n web_logging.debug('data for form= {}'.format(form_doc))\n form = Transaction(**form_doc)\n\n if form.validate_on_submit():\n contact_info = Contact()\n contact_info.from_form(form.data)\n\n contract_info = Contract()\n contract_info.from_form(form.data)\n\n if contact_info.contact_id is None:\n # contact is new\n contact_info.create_time = datetime.utcnow()\n contact_info.update_time = contact_info.create_time\n web_logging.debug('inserting contact_info= {}'.format(contact_info.as_dict()))\n web_logging.debug('inserting contract_info= {}'.format(contract_info))\n flash('inserting contact_info= {}, contract_info= {}'.format(contact_info.as_dict(), contract_info))\n result_contract = contract_info.mongo_insert()\n result_contact = contact_info.mongo_insert()\n # add contact id into document\n result_contract_upd = contracts.update_one({'_id': ObjectId(result_contract)},\n {'$set': {'contact': ObjectId(result_contact)}})\n result_contact_upd = numbers.update_one({'_id': ObjectId(result_contact)},\n {'$addToSet': {'contracts': ObjectId(result_contract)}})\n else:\n # contact already exists\n contact_info.update_time = datetime.utcnow()\n web_logging.debug('inserting contact_info= {}'.format(contact_info.as_dict()))\n web_logging.debug('inserting contract_info= {}'.format(contract_info))\n flash('updating contact_info= {}, creating contract_info= {}'.format(contact_info.as_dict(), contract_info))\n result_contract = contract_info.mongo_insert()\n result_contact = numbers.update_one({'_id': ObjectId(contact_info.contact_id)},\n {'$addToSet': {'contracts': ObjectId(result_contract)}})\n\n return redirect('/contracts')\n\n return render_template('contract.html', title=title, form=form, info=info)", "def test_save_multiple_contact(self):\n self.new_contact.save_contact()\n # new contact\n test_contact = Contact(\"Test\", \"user\", \"0798765432\", \"[email protected]\")\n test_contact.save_contact()\n self.assertEqual(len(Contact.contact_list), 2)", "def test_save_multiple_contacts(self):\n self.new_contact.save_contact() # saving the new contact\n test_contact = Contact(\"Test\", \"User\", 254712345678, \"[email protected]\") # new user\n test_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 2)", "def add_customer(customer_id, first_name, last_name, home_address,\n phone_number, email_address, status, credit_limit):\n # database.transaction; all work given to database gets done or none of it\n with cm.DATABASE.transaction():\n try:\n # .create inserts the data into the database\n new_customer = cm.Customer.create(customer_id=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit)\n # .save() will write the data to the database\n new_customer.save()\n LOGGER.info(\"Added customer [%s]\", customer_id)\n except pw.IntegrityError:\n LOGGER.error(\"Customer [%s] not added to database!\", customer_id)\n raise pw.IntegrityError", "def post(self, data: Dict):\n data = {\n 'create': {\n 'CONTACT': data\n }\n }\n return self.format_and_send_request(data)", "def PostData(name: str, email: str, message: str) -> dict:\n con = Contact(name=name, email=email, message=message)\n db.session.add(con)\n db.session.commit()\n return {\"status\": 200, \"message\": \"Message sended successfully\"}", "def add_contacts(self, contacts, group=None, group_uuid=None):\n payload = self._build_params(contacts=contacts, action='add', group=group, group_uuid=group_uuid)\n self._post('contact_actions', None, payload)", "def contact_create(request):\n\n if request.method == \"POST\":\n form = ContactForm(request.POST)\n profile_form = ContactProfileForm(request.POST)\n\n if form.is_valid() and profile_form.is_valid():\n contact = form.save()\n\n # Populate the required 'contact' field before saving\n profile = profile_form.save(commit=False)\n profile.contact = contact\n profile.save()\n\n messages.success(request, _(\"The contact %(name)s was successfully created\") % \n {'name': unicode(contact)})\n return HttpResponseRedirect(reverse(\"moderation.views.contact\", args=(contact.pk,)))\n else:\n form_initial = {'phone_number': request.GET.get('phone_number', '')}\n form = ContactForm(initial=form_initial)\n\n profile_initial = {}\n if \"facility\" in request.GET:\n facility = get_object_or_404(Facility, pk=request.GET['facility'])\n profile_initial['facility'] = facility.pk\n profile_form = ContactProfileForm(initial=profile_initial)\n\n return render_to_response(\"contact_create.html\", \n { 'form': form, \n 'profile_form': profile_form,\n },\n context_instance=RequestContext(request))", "def add():\n name = request.form['name']\n message = request.form['message']\n\n try:\n newcurs = g.conn.execute(\"\"\"INSERT INTO record\n VALUES (%s, %s );\"\"\", name, message)\n newcurs.close()\n except Exception:\n print \"can not write record to database\"\n return redirect('/error')\n\n return render_template(\"index.html\", **locals())", "def insert_person():\r\n body = request.get_json()\r\n\r\n try:\r\n INSERT_PERSON_SCHEMA.validate(body)\r\n except SchemaError as err:\r\n raise ServiceBodyError(str(err))\r\n\r\n with sqlite_client:\r\n person = (body.get('name'), body.get('cpf'))\r\n message = add_person(sqlite_client, person)\r\n\r\n return jsonify({'id': message})", "def add_customer(\n customer_id,\n name,\n last_name,\n home_address,\n phone_number,\n email_address,\n status,\n credit_limit,\n):\n LOGGER.info(\"Adding new customer, %s %s to database\", name, last_name)\n try:\n Customers.create(\n customer_id=customer_id,\n name=name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n LOGGER.info(\"Added new customer %s %s to database\", name, last_name)\n except IntegrityError as e_val:\n LOGGER.warning(\"Customer %s already exists\", customer_id)\n LOGGER.warning(e_val)", "def add_customer(customer_id, name, lastname, home_address,\n phone_number, email_address, status, credit_limit):\n try:\n with database.transaction():\n customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n customer.save()\n except Exception as unknown_error:\n print(unknown_error)", "async def add_contact_to_active_monitor(dbcon: DBConnection, contact_id: int, monitor_id: int) -> None:\n if not await active_monitor_exists(dbcon, monitor_id):\n raise errors.InvalidArguments('monitor does not exist')\n if not await contact_exists(dbcon, contact_id):\n raise errors.InvalidArguments('contact does not exist')\n q = \"\"\"replace into active_monitor_contacts (active_monitor_id, contact_id) values (%s, %s)\"\"\"\n q_args = (monitor_id, contact_id)\n await dbcon.operation(q, q_args)", "def setUp(self):\n # Below creating the new contact object to test.\n self.new_contact = Contact(\n \"James\", \"Muriuki\", \"0712345678\", \"[email protected]\")", "def add_new(cls, name, surname):\n\t\tauthor = Author.query.filter_by(name=name, surname=surname).first()\n\t\tif author:\n\t\t\t# Restore old.\n\t\t\tauthor.saved = True\n\t\telse:\n\t\t\t# Create new record.\n\t\t\tauthor = Author(name, surname)\n\t\t\tdb.session.add(author)\n\t\tdb.session.commit()\n\t\treturn author.id", "def add_person():\n # get values from user\n responses = accept_inputs([\"Name\"])\n # insert into db\n query_no_results(\"insert into person (name) values(?)\", [responses[\"Name\"]])\n print(\"New person created\")", "def save_object(self, data):\n return Contact(**data)", "def _setcontact(id, name=None, address=None, phone=None, email=None):\n try:\n if name is not None:\n r.set(\"uid:\" + id + \":name\", name)\n if address is not None: \n r.set(\"uid:\" + id + \":address\", address)\n if phone is not None: \n r.set(\"uid:\" + id + \":phone\", phone)\n if email is not None: \n r.set(\"uid:\" + id + \":email\", email)\n\n return True\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def add_contact_to_personal_addressbook(self, name, nickname, first_name, last_name, home_phone, mobile_number,\n business_number, fax, email, image_id, give_json=False):\n\n url = Constants.BASE_URL + 'users/addressbooks/personal'\n requestbody = JSON.dumps({\n \"contact\": {\n \"name\": name,\n \"nickname\": nickname,\n \"first_name\": first_name,\n \"last_name\": last_name,\n \"home_phone\": home_phone,\n \"mobile_number\": mobile_number,\n \"business_number\": business_number,\n \"fax\": fax,\n \"email\": email,\n \"image_id\": image_id\n }\n })\n response = requests.post(url=url, params={'key': self.user_access_token}, json=requestbody)\n\n if give_json:\n return response.json()\n else:\n return response.text", "def create_contact_info(query, user, number):\n\n data = {\n 'startDate': str_date(get_date()),\n 'number': number,\n 'user': user\n }\n\n fb.patch(query, data)", "def contact(self, contact):\n\n self._contact = contact", "def contact(self, contact):\n\n self._contact = contact", "def add_customer(login, password, name, phone, email):\n with MY_CONNECTION as connection:\n connection.execute(\n \"\"\"\n INSERT INTO Customers\n (login,password,customer_name,phone,email)\n VALUES(?,?,?,?,?)\n \"\"\",\n (login, password, name, phone, email))", "def setUp(self):\n self.new_contact = Contact(\"zoo\", \"vier\", 254719702373, \"[email protected]\")", "def __ui_add_new_person(self):\n person_id = int(input(\"ID: \"))\n person_name = input(\"Name: \").strip()\n person_phone_number = input(\"Phone number: \").strip()\n self.__person_service.service_add_person(person_id, person_name, person_phone_number)\n print(\"Person successfully added to your agenda!\\n\")", "async def get(self):\n await self.handle_request(self.contacts_new_api, 1)", "def add(name, phone, db):\n database = load(db)\n if name in database:\n print(\"%r already in %r\" % (name, db))\n sys.exit(-1)\n else:\n database[name] = phone\n database = OrderedDict(sorted(database.items()))\n pickle.dump(database, open(db, 'wb'))\n print(\"added '%s (%s)' to %r\" % (name, phone, db))", "def add_person():\n # Find the last used PK\n with sqlite3.connect('skeevy.db') as connection:\n cursor = connection.cursor()\n cursor.execute(\"SELECT id FROM person ORDER BY id DESC;\")\n for row in cursor.fetchone():\n last_pk = row\n\n # Auto-increment the primary key for the person table.\n last_pk = last_pk + 1\n\n # Prompt the user for the rest of their information.\n first_name = input(\"Enter your first name: \")\n middle_name = input(\"Enter your middle name: \")\n last_name = input(\"Enter your last name: \")\n suffix_name = input(\"Enter your suffix: \")\n e_mail = input(\"Enter your email: \")\n # Default status of the person is active (1).\n status = 1\n\n # Store the input in a variable.\n person_data = (last_pk, first_name, middle_name, last_name, suffix_name,\n e_mail, status)\n\n # Connect and insert the data into the person table.\n with sqlite3.connect('skeevy.db') as connection:\n cursor = connection.cursor()\n cursor.execute(\"INSERT INTO person VALUES(?, ?, ?, ?, ?, ?, ?);\",\n person_data)\n connection.commit()", "def test_projects_id_contacts_put(self):\n project = Contact()\n response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),\n method='PUT',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def add_contact(cmd, *args):\n cfg = get_config()\n nick = None\n if len(args) == 0:\n print(add_contact.__doc__)\n if len(args) >= 1:\n nick = args[0]\n fulname = nick # fullname fallback\n if len(args) >= 2:\n fullname = args[1]\n #print('fullname %s' %fullname)\n else:\n print(\"cant handle those params \" + str(args))\n\n vcard_fn = nick + '.vcf'\n vcard_fn = os.path.join(cfg['vcard_dir'], vcard_fn)\n #print('expecting file at %s' %vcard_fn)\n\n info = {}\n info['nick'] = nick\n info['fullname'] = fullname\n if len(fullname.split(' ')) > 1:\n subname = fullname.split()\n info['name'] = {'family': subname[0], 'given': subname[1]}\n if os.path.isfile(vcard_fn):\n print('file exists for %s, at %s please move or rename it'\n % (nick, vcard_fn))\n return False\n vcard = vobject.vCard()\n if os.path.isfile(vcard_fn):\n vcard = loadcraphere\n else:\n vcard_merge_in_dict(info, vcard)\n rawdata = vcard.serialize()\n with open(vcard_fn, 'w+') as fh:\n fh.write(rawdata)\n #print('written, sucker!')\n #annoyingly verbose vcard here'\n #Full Name = fn. Single string, entire name, required\n #x = vobject.vCard()\n # x.name = 'Foo'", "def appendedEntries(self):\n self.contact_list.append({\"name\": self.first_name.title() + \" \" + self.last_name.title(), \"phone number\": self.phone_number, \"phone number type\": self.phone_number_type})", "def insert(self, name, email, phone, address, state, zip, country, amount, message):\n params = {'name':name, 'email':email, 'phone':phone,'address':address,'state':state,\\\n 'zip':zip,'country':country,'amount':amount,'message':message}\n connection = sqlite3.connect(DB_FILE)\n cursor = connection.cursor()\n cursor.execute(\"insert into foodbank (name, email, phone, address, state, zip, country, amount, message)\\\n VALUES (:name, :email, :phone, :address, :state, :zip, :country, :amount, :message)\", params)\n\n connection.commit()\n cursor.close()\n return True", "def addNewAuthor(name: str, birth: str):\n if not name or not checkDate(birth):\n abort(400)\n author = Author(name=name, birth=birth)\n db.session.add(author)\n db.session.commit()\n app.logger.info(f\"New author with id: {author.id} added\")", "def list_contact(name):\n db = get_db()\n name = hashlib.sha256(name).hexdigest()\n \n if name in db:\n info = db[name]\n print logger.ok(\"\"\"\n Contact Information:\n Name: %s\n Phone Number: %s\n Email Address: %s\n \"\"\" % (info['name'], info['phone'], info['email']))\n else:\n sys.exit(logger.fail('fatal: contact does not exist'))", "def test_new_contact_association(self):\n node = self.create_xml_patient({'Mobile_Number': '12223334444',\n 'Pin_Code': '4444'})\n payload = self.create_payload([node])\n parse_patient(node, payload)\n patient = payload.patients.all()[0]\n self.assertTrue(patient.contact is not None)\n self.assertEqual(patient.contact.phone, '+12223334444')\n self.assertEqual(patient.contact.pin, '4444')", "def contact_form_submit(request):\n if request.method == \"POST\":\n full_name = request.POST['full_name']\n email_id = request.POST['email_id']\n contact_number = request.POST['contact_number']\n message = request.POST['message']\n ContactForm.objects.create(full_name=full_name,\n email_id=email_id,\n contact_number=contact_number,\n message=message\n )\n return HttpResponseRedirect(reverse('my_contact_data:contactdata'))", "def test_find_contact(self):\n self.new_contact.save_contact()\n test_contact = Contact(\"Test\", \"User\", 254711223344, \"[email protected]\")\n test_contact.save_contact()\n found_contact = Contact.find_by_phone(254711223344)\n\n self.assertEqual(found_contact.email, test_contact.email)", "def test_projects_id_contacts_post(self):\n project = Contact()\n response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),\n method='POST',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def addAccountContact(self,contact, accountId, responseFields = None):\r\n\r\n\t\turl = MozuUrl(\"/api/commerce/customer/accounts/{accountId}/contacts?responseFields={responseFields}\", \"POST\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"accountId\", accountId);\r\n\t\turl.formatUrl(\"responseFields\", responseFields);\r\n\t\tself.client.withResourceUrl(url).withBody(contact).execute();\r\n\t\treturn self.client.result();", "def post_save_add_contact(sender, **kwargs):\n obj = kwargs['instance']\n active_campaign_list = Campaign.objects.filter(phonebook__contact__id=obj.id,\n status=CAMPAIGN_STATUS.START)\n # created instance = True + active contact + active_campaign\n if kwargs['created'] and obj.status == CONTACT_STATUS.ACTIVE \\\n and active_campaign_list.count() >= 1:\n for elem_campaign in active_campaign_list:\n try:\n Subscriber.objects.create(\n contact=obj,\n duplicate_contact=obj.contact,\n status=SUBSCRIBER_STATUS.PENDING,\n campaign=elem_campaign)\n except:\n pass", "def _findAndAddContactByPhone(self, phone):\n try:\n contact = self._findAndAddContactsByPhone(phone)\n except TalkException as e:\n self.raise_error(e.reason)\n\n contact = contact.values()[0]\n\n for c in self.contacts:\n if c.id == contact.mid:\n self.raise_error(\"%s already exists\" % contact.displayName)\n return\n\n c = LineContact(self, contact)\n self.contacts.append(c)\n\n self.contacts.sort()\n return c", "def add_to_db(name, email_id):\n conn = None\n try:\n conn = connect_to_db()\n cur = conn.cursor()\n # This is the best way that I found to do an 'upsert' in a database agnostic way.\n # Try to update the data first, and if no records get updated, insert them.\n cur.execute(UPDATE_STMT.format(nm=name, em=email_id))\n if cur.rowcount == 0:\n cur.execute(INSERT_STMT.format(nm=name, em=email_id))\n conn.commit()\n print('Successfully added/updated record!')\n except Exception as e:\n print(str(e))\n disconnect_from_db(conn)\n raise e\n finally:\n disconnect_from_db(conn)", "def onAddRecord(self, event):\r\n dbItem = getattr(db, self.modelName) ()\r\n dlg = dPerson.Person(self.view, dbItem).view\r\n dlg.ShowModal()\r\n dlg.Destroy()" ]
[ "0.84468395", "0.8155297", "0.7787158", "0.7690058", "0.7496616", "0.73814785", "0.73638916", "0.73298204", "0.7302359", "0.7158444", "0.7154227", "0.71142626", "0.7102266", "0.7028337", "0.69548136", "0.6899097", "0.68859565", "0.6878193", "0.6827072", "0.6824314", "0.68185693", "0.6795603", "0.6789357", "0.6714719", "0.6703757", "0.6665329", "0.6643569", "0.6609067", "0.65624744", "0.6561256", "0.6493914", "0.6363666", "0.63342994", "0.6328498", "0.63205516", "0.6310245", "0.62744504", "0.6267023", "0.6260922", "0.62499195", "0.61221784", "0.61044055", "0.6099576", "0.60775685", "0.60665315", "0.6061081", "0.60536754", "0.60459626", "0.603857", "0.60380155", "0.60195976", "0.60067785", "0.5997444", "0.5983674", "0.5981563", "0.5976661", "0.59588546", "0.59486526", "0.59477144", "0.5936019", "0.5933769", "0.59283733", "0.59167516", "0.5900527", "0.5895679", "0.58930314", "0.5884775", "0.5856838", "0.5846897", "0.5846286", "0.584565", "0.58404166", "0.583372", "0.5833217", "0.582167", "0.581272", "0.58098674", "0.5784607", "0.5784607", "0.57837033", "0.5778675", "0.5771712", "0.57682294", "0.5764061", "0.5759084", "0.5753956", "0.5745349", "0.5745261", "0.57396394", "0.5729869", "0.5729776", "0.57271945", "0.572677", "0.57253057", "0.57134223", "0.5709467", "0.5706437", "0.57003766", "0.56937075", "0.5691682" ]
0.8165148
1
Print contacts without contacting to database
def list_contacts(self): return self.contacts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_contact(self):\n contacts = \"\".join(str(contact) for contact in self.contact_list)\n print(contacts)", "def do_show(self, line):\n\t\tif not(self.db is None):\n\t\t\tfor contact in self.db.contact.find():\n\t\t\t\tpprint.pprint(contact)\n\t\telse:\n\t\t\tprint(\"You must open the existing database or create new one.\")", "def view_contacts(self):\n with open(self.filename, \"r\") as contactsFile:\n contacts = self.display_contact(contactsFile.readlines())\n\n if not contacts:\n return self.msgbox(\"No contacts found.\")\n\n self.msgbox(msg=\"\\n\".join(contacts), title=\"Showing All Contacts\")", "def show_contacts():\n data_list = queries2.contacts()[0]\n table_titles = queries2.contacts()[1]\n title = \"Contacts\"\n return render_template('pages.html', data_list=data_list, title=title, table_titles=table_titles)", "def search_contact_list(self):\n\n search_db = Database()\n result = search_db.contact_search(self.name)\n if not result:\n print Fore.YELLOW + ' No such contact'\n return None\n if result > 1:\n print ' Which contact ??'\n for items in result:\n if items[2] > 1:\n print Fore.BLUE + ' %s %s %s' % ([items[0]], items[1], items[2])\n else:\n print str(items[1]), items[2]\n\n return result", "def print_all(self):\n with open(self.file, 'r', encoding='utf-8') as self.contacts_file:\n for i in self.contacts_file.readlines():\n print(i)", "def do_show(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\tprint(\"Contacts in the current book\\n\")\n\t\t\tself.cl.list_contacts()\n\t\telse:\n\t\t\tprint(\"To see contacts you need to open or create book\")", "def list_contact(name):\n db = get_db()\n name = hashlib.sha256(name).hexdigest()\n \n if name in db:\n info = db[name]\n print logger.ok(\"\"\"\n Contact Information:\n Name: %s\n Phone Number: %s\n Email Address: %s\n \"\"\" % (info['name'], info['phone'], info['email']))\n else:\n sys.exit(logger.fail('fatal: contact does not exist'))", "def present_data(self, data=None):\n print('--------------------------------------------------------------------------')\n print('{:<10}{:<10}{:<15}{:<17}{:<17}'.\n format(\n 'index',\n 'name',\n 'surname',\n 'email',\n 'phone'\n )\n )\n print('--------------------------------------------------------------------------')\n\n data = data if data else self.contacts\n for contact in data:\n print('{:<10}{:<10}{:<15}{:<17}{:<17}'.\n format(\n contact[0],\n contact[1],\n contact[2],\n contact[3],\n contact[4]\n )\n )", "def contacts():\n return render_template(\n \"contacts.html\",\n title = \"Contacts\")", "def view_all_persons():\n message = ''\n global conn\n with conn:\n rows = select_all_persons(conn)\n for row in rows:\n message += str(row) + \"\\n\"\n messagebox.showinfo('Person Table', message)", "def db_show_all():\n the_list = []\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n for key in db:\n person = Person()\n person.name = key\n person.phone = db[key]\n the_list.append(person)\n display_list(the_list)\n db.close()", "def contact(request):\n\n contacts = ContactDetails.objects\n return render(request, 'contact_app/contact.html', {\"contacts\":contacts})", "def get_contacts(userid):\n return 'get contacts - ' + userid", "def uglyprint(self):\n\n ctmp = self.conn.cursor()\n ctmp.execute(\"SELECT * FROM ATOM\")\n print(ctmp.fetchall())", "def Run(self):\n return self.ListAllContacts()", "def test_display_all_contact(self):\n self.assertEqual(Contact.display_contacts(), Contact.contact_list)", "def print_event(self):\n\n list_of_names = [str(c) for c in self.__list_of_contacts]\n joined_names = ', '.join(list_of_names)\n table = [[str(self._title)],[\"Date: \"+str(self._date)],[\"Time: \"+str(self._start)+\" - \"+str(self._end)],[\"Participants: \"+str(joined_names)]]\n print(tabulate(table, tablefmt='grid'))", "def writecontactstocsv(self , contact_entries):\n rx = re.compile('\\W+')\n allcontacts = []\n for entry in contact_entries:\n if entry.name is not None and len(entry.phone_number) > 0 and len(entry.group_membership_info) > 0:\n\n # Clean up characters in contact name; replace all non-alphanumerics with spaces\n fullname = entry.name.full_name.text\n fullname = rx.sub(' ', fullname).strip()\n for rawPhoneNumber in entry.phone_number:\n # Remove non-numeric characters from the phone number\n phone_number = re.sub(\"[^0-9]\", \"\", rawPhoneNumber.text)\n # Save contact for later insert\n allcontacts.append((fullname, phone_number))\n\n allcontacts = tuple(set(allcontacts))\n\n csvfilename = \"Downloads/ContactExport\"+time.strftime(\"%Y%m%d-%H%M%S\")+\".csv\"\n csvfile = open(csvfilename, \"w\")\n for csvFullName, csvPhoneNumber in allcontacts:\n line = \"\\\"%s\\\",%s\\n\" % (csvFullName, csvPhoneNumber)\n csvfile.write(line)\n\n csvfile.close()", "def prnt(self):\n print \"%s %s %s %s\" % (time.ctime(), self.time, self.who, self.region)\n print \"%s %s %s\" % (time.ctime(), ' ' * len(self.time), self.text)\n for r in self.recipients:\n print \"%s %s %s\" % (time.ctime(), ' ' * len(self.time), r)", "def display(phonebook):\n\n phonebook_data = read_phonebook(phonebook)\n\n for name in sorted(phonebook_data.keys(), key=str.lower):\n print name, phonebook_data[name]", "def phone_dir_nav():\n\n emps = Employee.query.all()\n\n for emp in emps: # [<Emp>, <Emp>]\n if emp.dept is not None:\n print(emp.name, emp.dept.dept_code, emp.dept.phone)\n else:\n print(emp.name, \"-\", \"-\")", "def test_display_all_contacts(self):\n self.assertEqual(Contact.display_all_contacts(), Contact.contact_list)", "def printResults(contact_map):\n print(\"----\")\n for participant in contact_map.values():\n print participant.getName()\n print \"Messages: \", participant.getMessageCount()\n print \"Words: \", participant.getWordCount()\n print \"Avg Words: \", participant.avgWords()\n print \"Messages initiaited: \", participant.getFirstMessageCount()\n print \"Hourly count: \", participant.getHourlyMessageCount()\n print \"Daily count: \", participant.getDailyMessageCount()\n print \"Monthly count: \", participant.getMonthlyMessageCount()\n print \"Most common word: \", participant.getMostCommonWord()\n print \"----\"", "def get_all(self):\n total_contacts = []\n get_count = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'CONTACTNAME',\n 'COMPANYNAME',\n 'FIRSTNAME',\n 'LASTNAME',\n 'INITIAL',\n 'PRINTAS',\n 'TAXABLE',\n 'MAILADDRESS.ADDRESS1'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n contacts = self.format_and_send_request(data)['data']['CONTACT']\n total_contacts = total_contacts + contacts\n offset = offset + pagesize\n return total_contacts", "def main2():\n\t\n\tcu_locations = cu_locations_data();\n\t\n\tfor row in cu_locations:\n\t\tprint \"INSERT INTO contact ('ref_id') VALUES (%s);\" % ( row['location_id'] );", "def display_list(the_list):\n print(\"\\n===================================\")\n for person in the_list:\n print(\"{name:12s}\\t\\t{phone}\".format(name=person.name, phone=person.phone))\n if the_list == []:\n print(\"\\nNo entries found!\\n\")\n print(\"===================================\\n\")", "def showEditContact(self):", "def simple_contacts(filename):\n\n try:\n file_path = open(filename, 'r', encoding='utf-8')\n\n except FileNotFoundError:\n pretty_print(\"Cannot open contacts.txt\", \":\")\n sleep(3)\n\n else:\n with file_path:\n print_list = []\n email_dict = {}\n for line in file_path:\n split_line = line.strip().split('|')\n\n if split_line[0].isnumeric():\n\n command = int(split_line[0])\n email = split_line[-1]\n print_list.append(split_line)\n email_dict[command] = email\n\n return print_list, email_dict", "def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts", "def contacts(request):\n User = get_user_model()\n ids = set(request.user.chatmessage_set.all().values_list(\"recipients\", flat=True))\n context = {\n 'contacts': User.objects.filter(pk__in=ids)\n }\n return render(request, \"chat/contacts.html\", context)", "def append_contacts(self, lines, lang):\n if lang==\"en\":\n lines.append(\"section Contacts\")\n elif lang==\"it\":\n lines.append(\"section Contatti\")\n lines.append(\"mailto://%s e-mail\" % flags['MAIL'])\n lines.append(\"verbatim %s\" % SKYPE)\n lines.append(\"verbatim &nbsp;\")\n return lines", "def list_contact(self, key, value):\n self.db.list_contact(\n key,\n value,\n )", "def print_customers(self):\n output = ''\n for i in range(len(self.customers)):\n output += f'Customer no. {self.customers[i].id} is in {self.customers[i].state[0]} section\\n'\n #print(output)\n with open('oneday.csv','a') as outfile:\n for i in range(len(self.customers)):\n outfile.write(f'{self.get_time()};{self.customers[i].id};{self.customers[i].state[0]}\\n')", "def get_queryset(self):\n return self.request.user.contacts.all()", "def delete_contact(self):\n delete_first_name = input(\"Enter first name that you want to delete\\n\")\n for contact in self.contact_list:\n if contact.first_name == delete_first_name:\n #print(str(contact))\n self.contact_list.remove(contact)\n else:\n print(f\"No contact is present with first name {delete_first_name} \")", "def display(self):\n print(f'{self.first_name} {self.last_name}, Customer#: '\n f'{self.customer_id}\\n{self.address}\\n{self.phone_number}\\n'\n f'{self.create_invoice()}')", "def display_accounts_details():\n return Records.display_records()", "def show():\n logger.info('List donors')\n try:\n logger.info('Connecting to database...')\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for i in Donor.select().order_by(Donor.donor_name):\n print(i)\n except Exception as e:\n logger.info(e)\n finally:\n database.close()", "def print_loc_acrnym():\n\n #Method2\n val = College.objects.values('acronym','contact')\n for i in val:\n print(i['acronym'],i['contact'])", "def _write_contact(self, size, card_writer):\n msg = []\n if (self.bcrparas or self.bctadds or self.bctparas or self.bctsets\n or self.bsurf or self.bsurfs):\n msg.append('$CONTACT\\n')\n for (unused_id, bcrpara) in sorted(self.bcrparas.iteritems()):\n msg.append(bcrpara.write_bdf(size, card_writer))\n for (unused_id, bctadds) in sorted(self.bctadds.iteritems()):\n msg.append(bctadds.write_bdf(size, card_writer))\n for (unused_id, bctpara) in sorted(self.bctparas.iteritems()):\n msg.append(bctpara.write_bdf(size, card_writer))\n\n for (unused_id, bctset) in sorted(self.bctsets.iteritems()):\n msg.append(bctset.write_bdf(size, card_writer))\n for (unused_id, bsurfi) in sorted(self.bsurf.iteritems()):\n msg.append(bsurfi.write_bdf(size, card_writer))\n for (unused_id, bsurfsi) in sorted(self.bsurfs.iteritems()):\n msg.append(bsurfsi.write_bdf(size, card_writer))\n return ''.join(msg)", "def printdonorlist():\n for name in donor_db:\n print(name)", "def __ui_search_persons_by_phone_number(self):\n searched_phone_number = input(\"Introduce the phone number: \").strip().lower()\n if searched_phone_number == \"\":\n print(\"You cannot search persons by an empty phone number!\\n\")\n return\n\n searched_persons = self.__person_service.find_persons_by_phone_number(searched_phone_number)\n\n if len(searched_persons) == 0:\n print('There is no person whose phone number matches with \"{}\"!\\n'.format(searched_phone_number))\n else:\n print(\"\")\n for person in searched_persons:\n print(person)\n print(\"\")", "def test_get_contacts(self):\n pass", "def contact_info(self, sensitive=True):\n account_id = self.account_id()\n retry_count = 5\n\n req_url = self.get(\"/accounts/{}/contacts\".format(account_id))['ResultUrl']\n resp = self.get(req_url)\n tries = 0\n while 'Contacts' not in resp and tries < retry_count:\n resp = self.get(req_url)\n tries += 1\n time.sleep(1)\n contacts = resp['Contacts']\n\n contact_data = list()\n for contact in contacts:\n row_data = {\n 'ContactId': contact['Id'],\n 'Email': \"*****@****.***\" if sensitive else contact['Email'],\n 'FirstName': \"*****\" if sensitive else contact['FirstName'],\n 'LastName': \"*****\" if sensitive else contact['LastName'],\n 'Status': contact.get('Status'),\n 'MembeshipEnabled': contact.get('MembershipEnabled'),\n 'TermsOfUseAccepted': contact['TermsOfUseAccepted'],\n }\n\n if 'MembershipLevel' in contact:\n row_data['MembershipLevel'] = contact['MembershipLevel']['Name']\n\n # Map all field values into a dict for convenience\n field_values = {val['FieldName']: val['Value']\n for val in contact['FieldValues']}\n\n # Get list of authorizations\n if 'Managed Authorizations' in field_values:\n authorizations = [i['Label']\n for i in field_values['Managed Authorizations']]\n row_data['Authorizations'] = authorizations\n\n contact_data.append(row_data)\n self.__contact_df = pd.DataFrame(contact_data).set_index('ContactId')\n return self.__contact_df", "def contact():\n return render_template(\n 'contact.jade',\n title='Contact',\n year=datetime.now().year,\n )", "def add_contact(self):\n contact = Contact.create_contact()\n self.contact_list.append(contact)\n\n df = pd.read_csv('address_book.csv')\n #print(df)\n adf = pd.DataFrame({'FIRST NAME': [contact.first_name],\n 'LAST NAME': [contact.last_name],\n 'ADDRESS': [contact.address],\n 'CITY': [contact.city],\n 'STATE': [contact.state],\n 'ZIP CODE': [contact.zip],\n 'PHONE NUMBER': [contact.phone_number],\n 'EMAIL': [contact.email]})\n adf.to_csv('address_book.csv',mode='a', header=False, index=None)\n #storing all contacts in address_book.csv file\n \"\"\"with open(\"address_book.csv\", \"w\") as f:\n for contact in self.contact_list:\n f.write(f\"FIRST NAME -> {contact.first_name}\\n\"\n f\"LAST NAME -> {contact.last_name}\\n\"\n f\"ADDRESS -> {contact.address}\\n\"\n f\"CITY -> {contact.city}\\n\"\n f\"STATE -> {contact.state}\\n\"\n f\"ZIP CODE -> {contact.zip}\\n\"\n f\"PHONE NUMBER -> {contact.phone_number}\\n\"\n f\"EMAIL -> {contact.email}\\n\\n\")\"\"\"", "def save(self):\n with open(self.file, 'w', encoding='utf-8') as self.contacts_file:\n self.contacts_file.seek(0)\n for line in self.contacts:\n self.contacts_file.write(\",\".join(line))\n self.contacts_file.write(\"\\n\")\n self.contacts_file.truncate()\n self.contacts_file.close()", "def exportPeople ( c ) :\n assert str(type(c)) == \"<type '_mysql.connection'>\"\n xml = \"\"\n p = sqlQuery ( c, \"select * from People;\" )\n for i in p :\n pL = sqlQuery ( c, \"select * from PeopleLocations where personID = '\" + i [ 0 ] + \"';\" )\n pER = sqlQuery ( c, \"select * from PersonExternalResources where personID = '\" + i [ 0 ] + \"';\" )\n pTO = sqlQuery ( c, \"select * from PeopleToOrganizations where personID = '\" + i [ 0 ] + \"';\" )\n cTP = sqlQuery ( c, \"select * from CrisesToPeople where personID = '\" + i [ 0 ] + \"';\" )\n xml += openTagAtt ( \"Person\", \"personIdent\", i [ 0 ] )\n xml += openTag ( \"Name\" )\n xml += openCloseTag ( \"FirstName\", i [ 1 ] )\n xml += openCloseTag ( \"MiddleName\", i [ 2 ] )\n xml += openCloseTag ( \"LastName\", i [ 3 ] )\n xml += openCloseTag ( \"Suffix\", i [ 4 ] )\n xml += closeTag ( \"Name\" )\n xml += closeTagAtt ( \"Kind\", \"personKindIdent\", i [ 5 ] )\n for j in pL :\n xml += openTag ( \"Location\" )\n xml += openCloseTag ( \"Locality\", j [ 1 ] )\n xml += openCloseTag ( \"Region\", j [ 2 ] )\n xml += openCloseTag ( \"Country\", j [ 3 ] )\n xml += closeTag ( \"Location\" )\n xml += openTag ( \"ExternalResources\" )\n for j in pER :\n xml += openCloseTag ( j [ 1 ] , j [ 2 ] )\n xml += closeTag ( \"ExternalResources\" )\n xml += openTag ( \"RelatedCrises\" )\n for j in cTP :\n xml += closeTagAtt ( \"RelatedCrisis\" , \"crisisIdent\", j [ 0 ] )\n xml += closeTag ( \"RelatedCrises\" )\n xml += openTag ( \"RelatedOrganizations\" )\n for j in pTO :\n xml += closeTagAtt ( \"RelatedOrganization\" , \"organizationIdent\", j [ 1 ] )\n xml += closeTag ( \"RelatedOrganizations\" )\n xml += closeTag ( \"Person\" )\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n return xml", "def get_personnel():\r\n if len(ceo) == 0:\r\n print(\"There are no CEO\\'s\")\r\n else:\r\n for i in ceo:\r\n print(str(i))", "def test_get_contact(self):\n pass", "def search_contact():\n if request.method == 'GET':\n tel = request.args.get('tel')\n contact = io_client.get_contacts(urn=['tel:+52' + tel]).all()\n if contact:\n return jsonify({\"existe\": \"Si\"}), 201\n return jsonify({\"existe\": \"No\"}), 404", "def get_contacts():\n return jsonify(g.driver.get_contacts())", "def print_the_contents_of_all_entries(self):\n\n if len(self.student_list):\n self.print_dataframe(self.student_list)\n else:\n print('There is no contents to show')", "def dont_print_empty(self):\n self.printEmpty = False", "def _print_basic_email_information(emails, conn):\n for mid in emails:\n (res, data) = conn.fetch(mid, '(ENVELOPE)')\n headers = pattern.match(data[0])\n print 'Date: %s' % headers.group(1)\n print 'Subject: %s' % headers.group(2)\n print 'From: %s <%s@%s>' % (headers.group(3), headers.group(4), headers.group(5))\n print", "def display_people_in_space() -> bool:\n\n res = requests.get(\"http://api.open-notify.org/astros.json\").json()\n number: int = res.get(\"number\")\n craft: str = res[\"people\"][0][\"craft\"]\n names: list = [\n person.get(\"name\") for person in res.get(\"people\")\n if person.get(\"craft\") == craft\n ]\n names_str: str = \", \".join(names)\n print(f\"There are {number} people aboard the {craft}. They are {names_str}\")\n\n return True", "def contact():\n return render_template('contact.html')", "def list(request, page=1, template='contacts/person/list.html'):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n person_list = Person.objects.all()\n\n if request.method == 'GET':\n form = PersonFilterForm(request.GET)\n if form.is_valid():\n if form.cleaned_data['last_name']:\n person_list = person_list.filter(last_name__istartswith=form.cleaned_data['last_name'])\n\n if form.cleaned_data['id_card']:\n person_list = person_list.filter(id_card__istartswith=form.cleaned_data['id_card'])\n\n if form.cleaned_data['contact_type']:\n person_list = person_list.filter(contact_type=form.cleaned_data['contact_type'])\n\n if form.cleaned_data['status']:\n person_list = person_list.filter(status=form.cleaned_data['status'])\n\n if form.cleaned_data['mailnotpaid_unsent']:\n person_list = person_list.filter(date_mailnotpaid__isnull = True).exclude(status='ok_all')\n\n if form.cleaned_data['mailregister_unsent']:\n person_list = person_list.filter(date_mailregister__isnull = True, status='ok_all')\n\n if form.cleaned_data['course']:\n person_list = person_list.filter(courses__in = form.cleaned_data['course'])\n\n\n else:\n form = PersonFilterForm()\n\n table = PersonTable(person_list, order_by = request.GET.get(\"sort\",'-date_registration') )\n table.paginate(page=request.GET.get(\"page\", 1))\n\n # Comprovam si hi ha nous registres per sincronitzar. Ho feim una vegada per sessio.\n if not request.session:\n request.session={}\n\n regs_not_sync = request.session.get('regs_not_sync',-1)\n if regs_not_sync == -1:\n regs_not_sync = check_pending_sync()\n request.session['regs_not_sync'] = regs_not_sync\n\n kwvars = {\n 'table' : table,\n 'form': form,\n 'regs_not_sync': regs_not_sync,\n }\n\n return render_to_response(template, kwvars, RequestContext(request))", "def homepage(self):\n print('-=' * 12 + \" Home Page \" + '-=' * 12)\n self._user.list_contacts()\n options = {1: self.add_contact, 2:self.remove_contact ,3: self.view_contact_chat, 4: self.sign_out, 5: self.exit}\n print_out = \"(1) Add new contact \\n (2) Remove Contact \\n (3) View my chats \\n (4) Sign out \\n (5) Exit\"\n return self._take_option(options, print_out)", "def show_all_customers():\n return cr.show_all_customers()", "def archive_contact_messages(self, org, contact):\n pass", "def contact(self, request, **kwargs):\n group_obj = self.get_object()\n contact_data = group_obj.contacts.all()\n if contact_data is not None:\n serializer_data = ContactSerializer(contact_data, many=True)\n return Response(serializer_data.data)\n else:\n return Response({'message': 'No details found for contact of this group'}, status=status.HTTP_404_NOT_FOUND)", "def __ui_list_all_persons(self):\n persons_list = self.__person_service.service_get_persons_list()\n\n if len(persons_list) == 0:\n print(\"The list of persons is empty!\")\n else:\n print(\"The list of persons in your agenda:\")\n for person in persons_list:\n print(\" \" + str(person))\n print(\"\")", "def getcontacts():\n contacts = {}\n\n try:\n #get list of contact ids\n contactids = r.smembers(\"contacts\")\n\n #for each contact id get data\n for contactid in contactids:\n contacts.update(_getcontact(str(contactid)))\n return contacts\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def displayFolowers(database):\n firstname=str(input(\"who do you want to display followers :\"))\n usr,find=getByName(database,firstname)\n if find:\n print(f\"{usr.firstname} {usr.lastname} is folowed by:\")\n for folower in usr.folowed:\n print(folower)", "def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts", "def print_all_clashes(clashes):\n if len(clashes) > 0:\n for tup_stud_info in sorted(clashes):\n stud_num, first_name, last_name = process_set_clashes(tup_stud_info)\n print(\" {}: {} {}\".format(stud_num, first_name, last_name))\n print()\n \n \n else:\n print(\" Nobody.\")\n print()", "def test_get_filter_no_effective_contacts(self):\n data = {\"type_contact\": 2}\n response = self.client.get(reverse('contacts-filter'), data)\n # import pdb; pdb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"count\"], 2)", "def list_accounts():\n\n try:\n accounts = Account.query.all()\n except NoResultFound:\n print(f\"No account configured yet.\")\n return\n n_len = max([len(a.nickname) for a in accounts if a.nickname != 'no.name'])\n fmt = \"{nickname:\" + str(n_len) + \"s}: {email:s}\"\n #import pdb; pdb.set_trace()\n for acct in [acct for acct in accounts if acct.nickname != 'no.name']:\n print(fmt.format(nickname=acct.nickname, email=acct.email))\n return", "def appendedEntries(self):\n self.contact_list.append({\"name\": self.first_name.title() + \" \" + self.last_name.title(), \"phone number\": self.phone_number, \"phone number type\": self.phone_number_type})", "def contacts(self, contacts):\n\n self._contacts = contacts", "def contacts(self, contacts):\n\n self._contacts = contacts", "def print_canonical_collection(cc):\n res = \"----------- Canonical Collection -----------\\n\"\n for elem in cc:\n res += str(elem) + \"\\n\"\n print(res)", "def print_db():\r\n try:\r\n conn = sqlite3.connect('account.db')\r\n c = conn.cursor()\r\n for row in c.execute(\"SELECT * FROM accounts\"):\r\n print(row)\r\n except sqlite3.DatabaseError:\r\n print(\"Error. Could not retrieve data.\")\r\n finally:\r\n if c is not None:\r\n c.close()\r\n if conn is not None:\r\n conn.close()", "def print_results(results):\n print(\"\\033[4m\\033[1m%-75s%s\\033[0m\" % (\"NAME\", \"ADDRESS\"))\n\n for selections in data:\n print(\"%-75s%s\" % (selections['applicant'], selections['location']))\n \n print(\"\\n\\033[1m--- PAGE \", page_num, \"---\\033[0m\\n\")", "def read_contacts(filename: str, database) -> None:\n wb = xlrd.open_workbook(filename) # opens the excel workbook\n sheet = wb.sheet_by_index(0) # gets the sheet\n\n number_of_rows = sheet.nrows # total number of rows in the sheet\n\n blank_counter = 0 # Will count the number of blank phone numbers\n\n # Loops through each row in the excel sheet\n for row in range(0, number_of_rows):\n name, email, phone = sheet.row_values(row) # assigns variables for the row contents\n # print(name, email, phone) # for debugging\n\n # If no email and phone, skip the contact\n if email == '' and phone == '':\n continue\n elif phone == '':\n # Here we pass the counter number so that there is a unique\n # number added to the blank phone number column\n add_contact(database, name, email, blank_counter)\n blank_counter += 1 # Increment the counter\n else:\n add_contact(database, name, email, phone)\n\n # print(sheet.row_values(row)) # for debugging", "def visualizar_completo(self):\r\n print(\r\n 'ID:\\t\\t\\t', self.ID,\r\n '\\nNome:\\t\\t\\t', self.nome,\r\n '\\nSobrenome:\\t\\t', self.sobrenome\r\n )\r\n for indice, telefone in enumerate(self.tel_list):\r\n print(f'Telefone {indice+1}:\\t\\t', telefone)\r\n for indice, email in enumerate(self.email_list):\r\n print(f'E-mail {indice+1}:\\t\\t', email)\r\n print('Empresa:\\t\\t', self.empresa)", "def contacts(self):\r\n return contacts.Contacts(self)", "def view_all_students():\n message = ''\n global conn\n with conn:\n rows = select_all_students(conn)\n for row in rows:\n message += str(row) + \"\\n\"\n messagebox.showinfo('Student Table', message)", "def print_entries(self):\n self.print_selected_entries(self.entries)", "def fetch_contacts(owner_account_id):\n resp = oauth.tapkey.get(f\"Owners/{owner_account_id}/Contacts?$select=id,identifier\")\n contacts = resp.json()\n return contacts", "def convert2csv(contacts, output_path):\n\n print(\"[!] not implemented yet\")", "def print(self, index):\n count=0\n start = self.head\n while start:\n if count==index:\n print(count, ' : ', start.getMember())\n break\n start=start.getLink()\n count+=1", "def print_sansanito():\n\tssn = \"SELECT * FROM sansanito\"\n\tcur.execute(ssn)\n\tprint_table(hdrs_sansanito)", "def del_contact_all(self):\n\n send_key(KEY_MENU)\n delstr = contact.get_value('contact_delete')\n if search_text(delstr):\n click_textview_by_text(delstr)\n click_checkbox_by_id('select_all_check')\n click_button_by_id('btn_ok')\n click_button_by_index(1)\n else:\n goback()\n\n sleep(2) #take a rest to wait view ...", "def contact():\n\n\treturn render_template('contact.html', title='Contact',\n\t\t\t\t\t\t year=datetime.now().year,\n\t\t\t\t\t\t message='Your contact page.')", "def show_user_contacts(user_id):\n\n user_contacts = Contact.query.filter_by(user_id=user_id).all()\n\n contacts = []\n for contact in user_contacts:\n contacts.append( { 'contact_id': contact.contact_id,\n 'first_name': contact.first_name,\n 'last_name': contact.last_name,\n 'email': contact.email } )\n\n return jsonify(contacts)", "def print_message(contact, message):\n print(f\"{contact}: {message}\")", "def test_get_contact_objects(self):\n\n contacts = MessageController.get_contact_objects(['2'])\n self.assertEqual(contacts[0].contact_first_name, 'Contact2')\n self.assertEqual(contacts[0].contact_phone, '4153417706')\n self.assertEqual(contacts[0].user_id, 1)\n self.assertEqual(contacts[0].lang_id, 1)", "def print_cust(self, msg):\n print(msg, end='')", "def create_contacts_list(self, contactsfile, templatefile='templates/contacts_list_template.tex', encoding='utf-8'):\n\n self.tex = \"\"\n\n with open(templatefile, 'r', encoding=encoding) as f:\n template = f.read().split(\"<+CONTACTS+>\")\n\n\n first_table = True\n\n with open(contactsfile, 'r', encoding=encoding) as c:\n lines = c.readlines()\n\n for line in lines:\n line = line.strip()\n\n if len(line) > 0:\n if line[0] == '#' and line[1] != '#':\n # Line is a heading.\n\n # We should end the previous table, if any:\n if not first_table:\n self.tex += \"\\n\\\\end{longtable}\\\\vspace*{1em}\\n\"\n\n self.tex += \"{{\\Large\\\\bfseries {heading}}}\\n\".format(heading=line.strip(\"# \"))\n\n elif line[0] == '#' and line[1] == '#':\n # Line specifies column headers.\n first_table = False\n\n split_line = line.strip('# ').split(';')\n n_cols = len(split_line)\n\n self.tex += \"\\\\begin{{longtable}}{{*{{{n}}}{{l}}}}\\n\".format(n=n_cols)\n\n headers = \"\"\n for i,word in enumerate(split_line):\n if i == 0:\n headers += \"\\\\textbf{{{word}}}\".format(word=word.strip())\n else:\n headers += \" & \\\\textbf{{{word}}}\".format(word=word.strip())\n headers += \"\\\\\\\\\\n\"\n self.tex += r\"\"\"\n\\toprule\n{headers}\n\\midrule\n\\endfirsthead\n\n\\toprule\n{headers}\n\\midrule\n\\endhead\n\n\\bottomrule\n\\endfoot\n\"\"\".format(headers=headers)\n\n else:\n # Line contains contact information:\n split_line = line.strip().split(';')\n if len(split_line) != n_cols:\n print(\"Warning! Line does not have the right number of columns! Line: {}\".format(line))\n\n for i,word in enumerate(split_line):\n if i == 0:\n self.tex += \"{word}\".format(word=word.strip())\n else:\n self.tex += \" & {word}\".format(word=word.strip())\n\n self.tex += \"\\\\\\\\\\n\"\n\n self.tex += \"\\\\end{longtable}\"\n template.insert(1,self.tex)\n self.tex = \"\\n\".join(template)", "def contact(request):\n assert isinstance(request, HttpRequest)\n contact = models.ContactUs.objects.all()\n return render(\n request,\n 'app/contact.html',\n {\n 'title':'Contact Us',\n 'message':'Our contact information:',\n 'year':datetime.now().year,\n 'contact': contact\n }\n )", "def get_queryset(self):\n user = self.request.user\n return Contact.objects.filter(owner=user)", "def main(from_file=None):\n if from_file:\n # TODO: simple txt, csv or json loading\n pass\n book = ContactBook()\n headers = [\"Ime\", \"Priimek\", \"Telefonska\", \"Leto rojstva\", \"E-pošta\"]\n # Start the programm\n print \"Wellcome to the Contact Book.\"\n print \"Let's get busy.\"\n while True:\n os.popen(CLEAR_SCREEN)\n print \"SmartNinja Contact book\"\n print \"\"\n print draw_table(book.get_data(), headers)", "def contact_list(request):\n if request.method == 'GET':\n contact = Contact.objects.all()\n serializer = ContactSerializer(contact, many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n serializer = ContactSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data,\n status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)", "def table_info(self):\n for customer in self.customers:\n print(customer.get_name())", "def add_contact(self):\n contact_list = {}\n contact_list[self.my_number] = self.name\n connect_db = Database()\n connect_db.add_contact(self.name, self.my_number)", "def Vcontacts(\n # Selectors\n leftSelector='', rightSelector='',\n # Left side positive filters\n chainLeftIn='',resiNumLeftIn='',resiNameLeftIn='',atomSerialLeftIn='',\n atomNameLeftIn='',\n # Left side negative filters\n chainLeftOut='',resiNumLeftOut='',resiNameLeftOut='', atomSerialLeftOut='',\n atomNameLeftOut='',\n # Right side positive filters\n chainRightIn='',resiNumRightIn='',resiNameRightIn='',atomSerialRightIn='',\n atomNameRightIn='',\n # Right side negative filters\n chainRightOut='',resiNumRightOut='',resiNameRightOut='',atomSerialRightOut='',\n atomNameRightOut='',\n # Contact Area\n contactAreaMin='',contactAreaMax='',\n # Minimal distance\n minimalDistanceMin='',minimalDistanceMax='',\n # Sequence separation\n seqSeparationMin='',seqSeparationMax='',\n # Misc.\n model='', solvent='False', color='white', invert='False', opacity='1',\n # Server connection\n host='127.0.0.1', port='8888',\n # Debug mode\n debug='False'\n ):\n\n # Logger level\n logging_level = logging.INFO if not Bool(debug) else logging.DEBUG\n\n # Init logger\n logging.basicConfig(format='%(levelname)s:%(message)s', level=logging_level)\n\n # Loggin error wrapper\n logging.parser_error = CallCounter(logging.error)\n\n # Get model from selectors\n sele_model = get_selectors_model(leftSelector, rightSelector)\n\n if sele_model:\n model = sele_model\n else:\n model = get_model(model)\n\n params = params_parser(solvent, color, invert, opacity)\n\n if logging.parser_error.counter != 0:\n return\n\n # Append atom serials\n atomSerialLeftIn = atomSerialLeftIn + get_serials(leftSelector)\n atomSerialRightIn = atomSerialRightIn + get_serials(rightSelector)\n\n # Compose query commands\n Vfilter = compose(\n # Left side positive filters\n chainLeftIn, resiNumLeftIn, resiNameLeftIn, atomSerialLeftIn,\n atomNameLeftIn,\n # Left side negative filters\n chainLeftOut, resiNumLeftOut, resiNameLeftOut, atomSerialLeftOut,\n atomNameLeftOut,\n # Right side positive filters\n chainRightIn, resiNumRightIn, resiNameRightIn, atomSerialRightIn,\n atomNameRightIn,\n # Right side negative filters\n chainRightOut, resiNumRightOut, resiNameRightOut, atomSerialRightOut,\n atomNameRightOut,\n # Contact Area\n contactAreaMin, contactAreaMax,\n # Minimal distance\n minimalDistanceMin, minimalDistanceMax,\n # Sequence separation\n seqSeparationMin, seqSeparationMax\n )\n\n\n query = json.dumps({\n 'filter': Vfilter,\n 'params': params\n })\n\n try:\n # Create TCP client obj\n client = TCPClient(host, port)\n # Start TCP client\n client.start()\n except Exception as e:\n logging.critical(e)\n logging.info('Server might not be running')\n return\n\n try:\n # Check if server has PDB file\n if not client.check_file(model):\n client.send_file(model)\n\n cgo_path = client.get_cgo(model, query)\n\n except socket.timeout as e:\n logging.error(\"Connection time out.\")\n return\n except Exception as e:\n logging.error(\"Server side error\")\n return\n\n del client\n\n # draw CGOs\n draw_CGO(cgo_path)\n\n return", "def feed_contact_from_db1(output_data, email):\n contactfl = ContactFromdb1.objects.using('db1').filter(\n contact_email__iexact=email,\n active=1\n )[0]\n\n if contactfl.firstname:\n output_data['first_name'] = contactfl.firstname\n\n if contactfl.lastname:\n output_data['last_name'] = contactfl.lastname\n\n # if contactfl.contact_phone:\n # output_data['phone'] = contactfl.contact_phone\n\n if contactfl.contact_social:\n output_data['linkedin_url'] = contactfl.contact_social\n\n if contactfl.position:\n output_data['title'] = contactfl.position\n\n if contactfl.company.company_name:\n output_data['company'] = contactfl.company.company_name\n\n return output_data" ]
[ "0.7405073", "0.7108748", "0.66840386", "0.6666923", "0.6527708", "0.6482683", "0.63072914", "0.63001305", "0.61677194", "0.61671007", "0.6138823", "0.60151744", "0.58849186", "0.58472013", "0.5808614", "0.57923657", "0.57631457", "0.5700023", "0.5692983", "0.56776345", "0.5623097", "0.56087774", "0.5597384", "0.5590279", "0.5553985", "0.5542999", "0.5536236", "0.55331486", "0.54789394", "0.5441368", "0.54217374", "0.5354656", "0.53540325", "0.53536433", "0.5345708", "0.5323097", "0.5319423", "0.5317147", "0.53124917", "0.5300447", "0.52871895", "0.5284866", "0.5278665", "0.5269461", "0.52617157", "0.52566546", "0.52453786", "0.52427685", "0.52413315", "0.5240663", "0.5236288", "0.5235686", "0.52191997", "0.51992875", "0.5197314", "0.5195164", "0.5183989", "0.51718104", "0.5167972", "0.515881", "0.5142105", "0.51419884", "0.5129883", "0.51254374", "0.5122517", "0.51223606", "0.51185894", "0.5105445", "0.5105365", "0.5104963", "0.5087039", "0.50821275", "0.50821275", "0.5080193", "0.50731194", "0.50703645", "0.5066753", "0.5063386", "0.5063098", "0.50586003", "0.5047798", "0.50449616", "0.5039883", "0.5036639", "0.5036001", "0.5030489", "0.50301534", "0.50272995", "0.50086975", "0.50048274", "0.5001929", "0.5000072", "0.49916777", "0.49897087", "0.49846873", "0.49840933", "0.49786302", "0.49785864", "0.49761707", "0.4972645" ]
0.55335945
27
Update contacts after change
def update_contacts(self): self.contacts = self.db.list_contacts() return self.list_contacts()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contacts_list_update(self):\n\t\tself.database.contacts_clear()\n\t\tclient_log.debug(f'Запрос контакт листа для пользователся {self.name}')\n\t\treq = {\n\t\t\tACTION: GET_CONTACTS,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username\n\t\t}\n\t\tclient_log.debug(f'Сформирован запрос {req}')\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tclient_log.debug(f'Получен ответ {ans}')\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tfor contact in ans[LIST_INFO]:\n\t\t\t\tself.database.add_contact(contact)\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список контактов.')", "def contacts(self, contacts):\n\n self._contacts = contacts", "def contacts(self, contacts):\n\n self._contacts = contacts", "def update_contact(self,name):\n update_choice = input(\"What part of the contact would you like to\"+\n \" modify? Enter name, number, email, or zipcode. \")\n find_contact = self.pull_one_contact(name)[1]\n \n if update_choice == \"name\":\n new_name = input(\"Please enter the updated name as\"+ \n \" firstname lastname: \")\n self.contacts[find_contact][0] = new_name\n print(f\"Your contact has been updated successfully with the\"+ \n f\" following information: \\n Name: {new_name}\")\n \n elif update_choice == \"number\":\n new_number = input(\"Please enter the updated number: \")\n self.contacts[find_contact][1] = new_number\n print(f\"Your contact has been updated successfully with the\"+ \n f\" following information: \\n Number: {new_number}\")\n \n elif update_choice == \"email\":\n new_email = input(\"Please enter the updated email: \") \n self.contacts[find_contact][2] = new_email\n print(f\"Your contact has been updated successfully with the\"+ \n f\" following information: \\n Email: {new_email}\")\n \n elif update_choice == \"zipcode\":\n new_zipcode = input(\"Please enter the updated zipcode: \")\n self.contacts[find_contact][3] = new_zipcode\n print(f\"Your contact has been updated successfully with the\"+ \n f\" following information: \\n Zipcode: {new_zipcode}\")\n \n else:\n sys.exit() \n self.save()", "def update_contact_in_db(self):\n self.init_db(self._testing)\n\n # making sure that the object is in the db\n assert not self.uid == \"\"\n\n self._update_row_in_db(Contact.table_name, Contact.columns, self.values_with_uid)", "def update_contacts(self, contacts):\n\n if contacts.time.size != 1:\n raise IndexError(\"Contacts should be from one frame only\")\n if contacts.channel.size != self.contacts.channel.size:\n self.new_contact_set(contacts)\n return # Prevent calling update_contacts recursively\n self.contacts = contacts\n contacts = np.array(contacts)\n\n for i, actor in enumerate(self.contacts_actors):\n # mapper = actors.GetNextActor().GetMapper()\n mapper = actor.GetMapper()\n self.contacts_actors[i].GetProperty().SetColor(self.contacts_color)\n self.contacts_actors[i].GetProperty().SetOpacity(self.contacts_opacity)\n source = vtkSphereSource()\n source.SetCenter(contacts[0:3, i])\n source.SetRadius(self.contacts_size)\n mapper.SetInputConnection(source.GetOutputPort())", "def update_contacts(self, contact_list):\n updated_contacts = 0\n request_list = list()\n\n # stale_contacts contains all old contacts at first, all current\n # contacts get then removed so that the remaining can get deleted\n stale_contacts = set(self.contacts)\n\n for contact in contact_list:\n c = Persona.query.get(contact[\"id\"])\n\n if c is None:\n c = Persona(id=contact[\"id\"], _stub=True)\n\n if c._stub is True:\n request_list.append(contact[\"id\"])\n\n try:\n # Old and new contact; remove from stale list\n stale_contacts.remove(c)\n except KeyError:\n # New contact\n self.contacts.append(c)\n updated_contacts += 1\n\n # Remove old contacts that are not new contacts\n for contact in stale_contacts:\n self.contacts.remove(contact)\n\n app.logger.info(\"Updated {}'s contacts: {} added, {} removed, {} requested\".format(\n self.username, updated_contacts, len(stale_contacts), len(request_list)))\n\n return request_list", "def do_update(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\ttry:\n\t\t\t\tself.cl.update_contact(*line.split())\n\t\t\t\tprint(\"Updated Contact with id: {}. {}={}\".format(*line.split()))\n\t\t\texcept TypeError:\n\t\t\t\tprint(\"Wrong syntax! Type 'help update'\")\n\t\telse:\n\t\t\tprint(\"To update contacts you need to open or create book\")", "def test_update_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n\n dictionary = org.as_dict()\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']\n\n org.update_contact(TestContactInfo.contact2)\n\n dictionary = org.as_dict()\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact2['email']", "def edit_contact(self):\n edit_data = input(\"Enter the first name of user you want to edit\\n\")\n\n for contact in self.contact_list:\n if contact.first_name == edit_data:\n user_input = int(input(\n \"Enter the number that you want to edit field in details\"\n \" \\n 1. First Name 2. Last name 3. Address 4. city 5. state 6.zip 7. Phone number 8.Email \\n\"))\n if user_input == 1:\n first_name = input(\"Enter new first name\\n\")\n contact.first_name = first_name\n elif user_input == 2:\n last_name = input(\"Enter new last name\\n\")\n contact.last_name = last_name\n elif user_input == 3:\n address = input(\"Enter new address\\n\")\n contact.address = address\n elif user_input == 4:\n city = input(\"Enter new city\\n\")\n contact.city = city\n elif user_input == 5:\n state = input(\"Enter new state\\n\")\n contact.state = state\n elif user_input == 6:\n zip = input(\"Enter new zip\\n\")\n contact.zip = zip\n elif user_input == 7:\n phone_number = input(\"Enter new phone number\\n\")\n contact.phone_number = phone_number\n elif user_input == 8:\n email = input(\"Enter new email\\n\")\n contact.email = email\n else:\n print(\"Please enter a valid input\")\n else:\n print(\"Please enter a valid name\")", "def showEditContact(self):", "def edit_contact(contact):\n db = get_db()\n \n if contact.get_hash_name() in db:\n db[contact.get_hash_name()] = json.loads(contact.json())\n write_db(db)\n else:\n sys.exit(logger.fail('fatal: contact does not exist'))", "def refreshContacts(self):\n contact_ids = self._getAllContactIds()\n contacts = self._getContacts(contact_ids)\n\n self.contacts = [LineContact(self, contact) for contact in contacts]\n\n self.contacts.sort()", "def test_updateContact(self):\n qs = Contact.objects.all()\n contact = qs[0]\n contact2 = Contact.objects.get(id=contact.id)\n to_update_value = 'address 2'\n contact2.address = to_update_value\n contact2.save()\n # refresh from db\n contact3 = Contact.objects.get(id=contact.id)\n self.assertEqual(contact3.address, to_update_value)", "def set_contacts(self, contacts):\n\n\t\tif contacts is not None and not isinstance(contacts, list):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: contacts EXPECTED TYPE: list', None, None)\n\t\t\n\t\tself.__contacts = contacts\n\t\tself.__key_modified['Contacts'] = 1", "def test_updateContact(self):\n response = self.client.get(self.url)\n qs = response.json()\n contact = qs[0]\n to_update_value = 'address 2'\n contact['address'] = to_update_value\n response = self.client.put(self.url + str(contact['id']) + '/', contact, content_type=\"application/json\")\n self.assertEqual(response.status_code, 200)\n contact2 = response.json()\n self.assertEqual(contact2['address'], to_update_value)", "def contact_list(self, contact_list):\n \n self._contact_list = contact_list", "def new_contact_set(self, contacts):\n if contacts.time.size != 1:\n raise IndexError(\"Contacts should be from one frame only\")\n self.contacts = contacts\n\n # Remove previous actors from the scene\n for actor in self.contacts_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.contacts_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtk.vtkPoints()\n for i in range(contacts.channel.size):\n # Create a mapper\n mapper = vtkPolyDataMapper()\n\n # Create an actor\n self.contacts_actors.append(vtkActor())\n self.contacts_actors[i].SetMapper(mapper)\n\n self.parent_window.ren.AddActor(self.contacts_actors[i])\n\n # Update marker position\n self.update_contacts(self.contacts)", "def pull_contacts(self, org, modified_after, modified_before, progress_callback=None):\n pass", "def update_contact(self, context, payload):\n\n if context.get('headers').get('api_key') is None or context.get('headers').get('app_id') is None:\n raise Exception(\"Please provide Api-Key and Api-Appid\")\n \n # Set headers\n headers = {\n \"Api-Key\": context.get('headers').get('api_key'),\n \"Api-Appid\": context.get('headers').get('app_id'),\n \"Content-Type\": \"application/json\"\n }\n payload[\"id\"] = payload.get(\"contact_id\")\n response = requests.request(\"PUT\", f'{self.url}Contacts', headers=headers, data=payload).text\n response = json.loads(response)\n response = response[\"data\"][\"attrs\"]\n return response", "def test_update_contact(mock_app, gpx4_patients):\n\n runner = mock_app.test_cli_runner()\n patients_collection = mock_app.db.patients\n\n # GIVEN a database with some patients\n patients_collection.insert_many(gpx4_patients)\n test_patients = patients_collection.find()\n # Sharing a contact information\n contacts = test_patients.distinct(CONTACT_HREF)\n assert len(contacts) == 1\n\n # WHEN their contact info is updated using the cli\n new_href = \"[email protected]\"\n result = runner.invoke(\n cli,\n [\n \"update\",\n \"contact\",\n \"--old-href\",\n contacts[0],\n \"--href\",\n new_href,\n \"--name\",\n NEW_NAME,\n \"--institution\",\n TEST_INST,\n ],\n input=\"y\",\n )\n assert result.exit_code == 0\n\n # THEN the config info should be updated\n updated_patient = patients_collection.find({CONTACT_HREF: \":\".join([\"mailto\", new_href])})\n assert len(list(updated_patient)) > 0", "def update_contact(self, uuid, name, urns, fields, groups):\n payload = self._build_params(uuid=uuid, name=name, urns=urns, fields=fields, group_uuids=groups)\n return Contact.deserialize(self._post('contacts', None, payload))", "async def update_contact(dbcon: DBConnection, contact_id: int, data: Dict[str, str]) -> None:\n\n async def _run(cur: Cursor) -> None:\n for key, value in data.items():\n if key not in ['name', 'email', 'phone', 'active']:\n raise errors.IrisettError('invalid contact key %s' % key)\n q = \"\"\"update contacts set %s=%%s where id=%%s\"\"\" % key\n q_args = (value, contact_id)\n await cur.execute(q, q_args)\n\n if not await contact_exists(dbcon, contact_id):\n raise errors.InvalidArguments('contact does not exist')\n await dbcon.transact(_run)", "def contact(self, contact):\n\n self._contact = contact", "def contact(self, contact):\n\n self._contact = contact", "def edit_contact(_id: str):\n # ------- GET ------------\n title = 'Edit contact -- ' + _id\n doc = numbers.find_one(ObjectId(_id), projection={'_id': False, 'create_time': False, 'update_time': False})\n # convert array in to string separated by coma\n doc = {key: (', '.join(val) if type(val) == list else val) for key, val in doc.items()}\n form = SaveNumber(**doc)\n # ========================\n\n if form.validate_on_submit():\n web_logging.debug('request to edit _id= {}'.format(_id))\n # prepare for mongo\n form_recieved = Contact()\n form_recieved.from_form(form.data)\n form_recieved.update_time = datetime.utcnow()\n\n result = form_recieved.mongo_update(_id)\n match_count, modified_count = result\n flash(\"match_count= {}, modified_count= {}\".format(match_count, modified_count))\n return redirect(url_for('add_edit_number'))\n\n return render_template('save_contact.html', title=title, form=form)", "def set_contacts_color(self, contacts_color):\n self.contacts_color = contacts_color\n self.update_contacts(self.contacts)", "def notify(self):\n for customer in self.customers:\n customer.update()", "def update(max_iterations):\n persons = get_persons()\n count = 0\n for person in persons:\n if count > max_iterations:\n return\n count += 1\n if choice([0, 1]):\n new_person = make_random('en')\n new_person['id'] = person['id']\n params = {\"event\": \"contact.update\",\n \"data\": new_person}\n request(params)", "def put(self, id):\n return Contacts().update_one(id, request.json)", "def changecontact(id, name=None, address=None, phone=None, email=None):\n try:\n currentid = str(id)\n _setcontact(currentid, name, address, phone, email)\n\n return _getcontact(currentid)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def add_contact(self, name, number, email, zipcode):\n \n new_contact = f\"{name}, {number}, {email}, {zipcode}\"\n contact_list = [name,number,email,zipcode]\n self.contacts.append(contact_list)\n self.save()\n print(f\"Thank you {new_contact} has been added to your contact book.\")", "def test_projects_id_contacts_put(self):\n project = Contact()\n response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),\n method='PUT',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_projects_id_contacts_patch(self):\n project = Contact()\n response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),\n method='PATCH',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def change_person_to_db(self):\n self.EditPeople.update_sql(self.sql)\n info = self.EditPeople.updated_info()\n self.fullname.setText(info[\"fullname\"])\n # self.search_people_by_name(info['fullname'])", "def test_modify_phonebook(self):\n bt_contacts_utils.generate_contact_list(self.contacts_destination_path,\n PSE_CONTACTS_FILE, 100)\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse, self.contacts_destination_path, PSE_CONTACTS_FILE)\n if not self.connect_and_verify(phone_numbers_added):\n return False\n\n bt_contacts_utils.erase_contacts(self.pse)\n bt_contacts_utils.generate_contact_list(self.contacts_destination_path,\n PSE_CONTACTS_FILE, 110, 2)\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse, self.contacts_destination_path, PSE_CONTACTS_FILE)\n return self.connect_and_verify(phone_numbers_added)", "def update_soft_contacts(self, soft_contacts):\n\n if soft_contacts.time.size != 1:\n raise IndexError(\"soft_contacts should be from one frame only\")\n if soft_contacts.channel.size != self.soft_contacts.channel.size:\n self.new_soft_contacts_set(soft_contacts)\n return # Prevent calling update_soft_contacts recursively\n self.soft_contacts = soft_contacts\n soft_contacts = np.array(soft_contacts)\n\n for i, actor in enumerate(self.soft_contacts_actors):\n # mapper = actors.GetNextActor().GetMapper()\n mapper = actor.GetMapper()\n self.soft_contacts_actors[i].GetProperty().SetColor(self.soft_contacts_color)\n self.soft_contacts_actors[i].GetProperty().SetOpacity(self.soft_contacts_opacity)\n source = vtkSphereSource()\n source.SetCenter(soft_contacts[0:3, i])\n source.SetRadius(self.soft_contacts_size[i])\n mapper.SetInputConnection(source.GetOutputPort())", "async def set_contacts(self, contacts: List[CertificateContact], **kwargs) -> List[CertificateContact]:\n new_contacts = await self._client.set_certificate_contacts(\n vault_base_url=self.vault_url,\n contacts=self._models.Contacts(contact_list=[c._to_certificate_contacts_item() for c in contacts]),\n **kwargs\n )\n return [\n CertificateContact._from_certificate_contacts_item(contact_item=item) for item in new_contacts.contact_list\n ]", "def on_contact(self, update, context):\n user = update.effective_user\n chat_id = update.effective_chat.id\n phone = update.message.contact.phone_number\n log.info(\n \"TEL from %s, %s, @%s, %s\", user.username, user.full_name, chat_id, phone,\n )\n\n # Here's an example of what else you can find in update['message'].contact.to_dict()\n # {'phone_number': '+4500072470000', 'first_name': 'Alex', 'user_id': 253150000}\n # And some user-related details in update.effective_user.to_dict()\n # {'first_name': 'Alex', 'id': 253150000, 'is_bot': False, 'language_code': 'en', 'username': 'ralienpp'}\n\n # Tell the backend about it, such that from now on it knows which chat_id corresponds to this user\n known_user = self.backend.link_chatid_to_volunteer(\n user.username, update.effective_chat.id, phone\n )\n\n if known_user:\n # Mark the user as available once onboarding is complete\n context.user_data[\"state\"] = c.State.AVAILABLE\n # Acknowledge receipt and tell the user that we'll contact them when new requests arrive\n update.message.reply_text(c.MSG_STANDBY)\n return\n\n # If we got this far, this is a completely new person who initiated the registration process via the bot, it is\n # time to ask them a few things and build a profile\n self.build_profile(update, context, phone=phone)", "def contact_reactivated(self, node, contact):\n for subscriber in self.subscribers:\n subscriber.contact_reactivated(node, contact)", "def delete_contacts(self):\n self.db.delete_all_contacts()\n return self.update_contacts()", "def test_save_contact(self):\n # .save_contact() is the save to contact function.\n # Test would check if an addition has been made to our contact list\n self.new_contact.save_contact()\n self.assertEqual(len(Contact.contact_list), 1)", "def test_edit_contact_list(self):\n c1 = ContactFactory(company_id=self.company.id)\n contact_list = ContactList.objects.first()\n data = ContactListSerializer(contact_list).data\n\n data['title'] = 'Nestle'\n data['contact_ids'] = [c1.id]\n\n url, parsed = self.prepare_urls('v1:contact_list-detail', subdomain=self.company.subdomain, kwargs={'pk':contact_list.id})\n \n response = self.client.put(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.put(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n url, parsed = self.prepare_urls('v1:contact_list-detail', subdomain=self.company.subdomain, kwargs={'pk':contact_list.id})\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(content['title'], 'Nestle')\n self.assertEqual(content['contacts'], [c1.id])", "def updateAuthors(self,event=None):\r\n self.popAuthors()\r\n self.primeAuthor.updateVals(self.authorList)\r\n self.coAuthor.updateVals(self.authorList)\r\n self.correspond.updateVals(self.authorList)", "def contact(self, contact):\n\n self.logger.debug(\"In 'contact' setter.\")\n\n self._contact = contact", "def test_sync_from_sugar_contact(self):\n LOG.debug('test_sync_from_sugar_contact')\n business = Business.objects.get(id=114)\n advertiser = Advertiser.objects.get(id=114)\n email = advertiser.email\n module = \"Contacts\"\n query = build_recent_entry_query(module=module, test_mode=True, \n get_modified=False, start=None)\n sugar_list = self.sugar.get_entry_list(module, query)\n sugar_dict = sugar_list[0]\n sugar_dict['advertiser_id_c'] = ''\n self.sugar.set_entry(module, dict_to_name_value(sugar_dict))\n billing_record = BillingRecord.objects.get(id=114)\n order = billing_record.orders.all()[0]\n order.delete()\n billing_record.delete()\n business.delete()\n consumer = Consumer.objects.get(email=email)\n consumer.delete()\n advertiser.delete()\n sync_business_from_sugar(test_mode=True, sugar=self.sugar)\n # business is not created since Sugar record modified by 10Coupons user\n try:\n business = Business.objects.get(advertiser=advertiser)\n self.assertTrue(False)\n except business.DoesNotExist:\n self.assertTrue(True)", "def receiveContactList(self, contactList):", "def test_save_multiple_contacts(self):\n self.new_contact.save_contact() # saving the new contact\n test_contact = Contact(\"Test\", \"User\", 254712345678, \"[email protected]\") # new user\n test_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 2)", "def contact_info(self, contact_info):\n\n self._contact_info = contact_info", "def update_rec(self):\n print(\"Write phone number:\")\n update_phone_number_input = input()\n print(\"Write new name of the record:\")\n update_name_input = input()\n print(\"Write new address:\")\n update_address_input = input()\n return self.storage.update(\n update_phone_number_input, update_name_input, update_address_input\n )", "def editar_contacto(agenda_path):\n\tcontacto_mod = input('Contacto a modificar: ')\n\t#que contacto quiere modificar el usuario\n\tagenda = descargar_agenda(agenda_path)\n\tif contacto_mod not in agenda:\n\t\tprint ('El contacto no existe, agreguelo desde el menu')\n\t\tcontinue\n\t\t#por si el contacto no esta agendado\n\tfor i in range(len(agenda)):\n\t if agenda[i][0] == nombre:\n\t \tindex = i\n\t \tbreak\n\t if index != None:\n\t \tprint('Omite aquellos campos que no quieras editar para conservar los datos')\n\t \tnombre = input('Nombre:')\n\t \ttelefono = input('Telefono: ')\n\t \tdireccion = input('Dirección') \n\t \tcontacto[index] = [\n nombre if len(nombre) > 0 else agenda[index][0],\n telefono if len(telefono) > 0 else agenda[index][1],\n direccion if len(direccion) > 0 else agenda[index][2] ]\n\t \tcargar_agenda(agenda_path, agenda)\n\t \tprint('Editado con exito!')\n\t\t\t #modificar los datos del contacto que desea el usuario", "def test_save_contact(self):\n self.new_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 1)", "def test_save_multiple_contact(self):\n self.new_contact.save_contact()\n # new contact\n test_contact = Contact(\"Test\", \"user\", \"0798765432\", \"[email protected]\")\n test_contact.save_contact()\n self.assertEqual(len(Contact.contact_list), 2)", "def add_contact(self, contact):\n self.db.insert_contact(contact)\n return self.update_contacts()", "def set_contacts_size(self, contacts_size):\n self.contacts_size = contacts_size\n self.update_contacts(self.contacts)", "def patch(self):\n contact = Contacts.query.order_by(desc(Contacts.Created)).first_or_404()\n result = []\n status_code = 204\n try:\n result = patch_item(contact, request.get_json())\n db.session.commit()\n except Exception:\n # If any other exceptions happened during the patching, we'll return 422\n result = {\"success\": False, \"error\": \"Could not apply patch\"}\n status_code = 422\n\n return make_response(jsonify(result), status_code)", "def updateAccountContact(self,contact, accountId, contactId, responseFields = None):\r\n\r\n\t\turl = MozuUrl(\"/api/commerce/customer/accounts/{accountId}/contacts/{contactId}?responseFields={responseFields}\", \"PUT\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"accountId\", accountId);\r\n\t\turl.formatUrl(\"contactId\", contactId);\r\n\t\turl.formatUrl(\"responseFields\", responseFields);\r\n\t\tself.client.withResourceUrl(url).withBody(contact).execute();\r\n\t\treturn self.client.result();", "def support_contacts(self, support_contacts):\n self._support_contacts = support_contacts", "def list_contact(self, key, value):\n self.db.list_contact(\n key,\n value,\n )", "async def post(self):\n await self.handle_request(self.contacts_new_api, 1)", "def add_contact_to_db(self):\n self.init_db(self._testing)\n\n # make sure that the object is not in the db\n assert self.uid == \"\"\n\n self._insert_row_into_db(Contact.table_name, Contact.columns, self.values)\n\n # update this objects uid\n self.uid = self._get_id_of_last_row(Contact.table_name)", "def update_data(self, data):\r\n rebuild = False\r\n\r\n # This method needs to substitute some defaultdicts for the normal\r\n # dictionaries that come back from the server.\r\n\r\n # Metacontact information\r\n\r\n #if data['metacontacts']\r\n mc_dict = data.get('metacontacts', {})\r\n if not isinstance(mc_dict, dict):\r\n log.critical('invalid metacontacts dictionary')\r\n mc_dict = {}\r\n\r\n # Contact information like SMS numbers and email addresses.\r\n self.info = defaultdict(dict)\r\n\r\n si = self.info\r\n if 'info' in data:\r\n for (k, v) in data['info'].iteritems():\r\n if isinstance(k, str):\r\n cmpk = k.decode('utf8')\r\n else:\r\n cmpk = k\r\n\r\n if not isinstance(cmpk, unicode):\r\n continue\r\n\r\n if cmpk.startswith('Meta') or any((cmpk.endswith('_' + prot)\r\n for prot in protocols.iterkeys())):\r\n if any(v.values()):\r\n si[k] = v\r\n\r\n for c, v in si.iteritems():\r\n for attr in ('email', 'sms'):\r\n if attr in v:\r\n self.contact_info_changed(c, attr, v[attr])\r\n\r\n self.metacontacts = MetaContactManager(self, mc_dict)\r\n if hasattr(self, 'new_sorter'):\r\n on_thread('sorter').call(self.new_sorter.removeAllContacts)\r\n rebuild = True\r\n\r\n # Manual ordering of groups\r\n try:\r\n self.order = deepcopy(data['order'])\r\n self.order['groups'] = list(oset(self.order['groups']))\r\n contacts = self._filtered_contacts()\r\n self.order['contacts'] = defaultdict(list)\r\n self.order['contacts'].update(contacts)\r\n except Exception:\r\n log.critical('error receiving order')\r\n self._init_order()\r\n\r\n # note: loading tofrom data from the network is deprecated. this data\r\n # now goes out to disk. see save/load_local_data\r\n if 'tofrom' in data and isinstance(data['tofrom'], dict) and \\\r\n 'im' in data['tofrom'] and 'email' in data['tofrom']:\r\n self.dispatch.set_tofrom(deepcopy(data['tofrom']))\r\n\r\n if rebuild:\r\n self.rebuild()\r\n\r\n self.update_order()", "def save(self):\n with open(self.file, 'w', encoding='utf-8') as self.contacts_file:\n self.contacts_file.seek(0)\n for line in self.contacts:\n self.contacts_file.write(\",\".join(line))\n self.contacts_file.write(\"\\n\")\n self.contacts_file.truncate()\n self.contacts_file.close()", "def __ui_update_person(self):\n to_update_person_id = int(input(\"Introduce the ID of the person you want to update: \"))\n updated_person_name = input(\"Updated name: \").strip()\n updated_phone_number = input(\"Updated phone number: \").strip()\n self.__person_service.service_update_person(to_update_person_id, updated_person_name, updated_phone_number)\n print(\"Person successfully updated!\\n\")", "def MultiWaySync(self, accounts):\n\t\tcleaned_contacts = []\n\t\tcontacts = []\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tcontacts.extend(self.GetContactList())\n\t\t\n\t\tduplicates, originals = ceFindDuplicates(contacts)\n\t\tmerged, todelete = ceMergeDuplicates(duplicates)\n\t\t\n\t\tcleaned_contacts.extend(originals)\n\t\tcleaned_contacts.extend(merged)\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tself.RemoveAll()\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tfor contact in cleaned_contacts:\n\t\t\t\tself.BatchEnqueue('create', contact)\n\t\t\tself.ExecuteBatchQueue()", "def db_update_entry():\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n name = get_name()\n if name in db:\n phone_number = get_phone_number(db[name.capitalize()])\n print(\"Updating existing entry ..... {name}\\n\".format(name=name))\n db[name.capitalize()] = phone_number\n db.sync()\n else:\n print_error()\n db.close()\n db_show_all()", "def update_ldap_contact(ldap_con, change, unique_id, cache):\r\n\r\n # attributes to change\r\n mod_attrs = []\r\n for key in change.keys():\r\n if key == unique_id:\r\n continue\r\n mod_attrs.append((ldap.MOD_REPLACE, key, change[key]))\r\n\r\n # get dn from cache\r\n dn = cache[change[unique_id]]\r\n logging.info('UPDATE: {0} {1}'.format(dn, mod_attrs))\r\n\r\n # update the ldap\r\n ldap_con.modify_s(dn, mod_attrs)", "def update_customer_phone(self, customer_to_change, new_value):\n customer_list = self._customer_repo.get_customer_list()\n for customer in customer_list:\n if customer.get_customer_id() == customer_to_change.get_customer_id():\n customer.set_phone(new_value)\n self._customer_repo.overwrite_customer_list(customer_list)", "def new_soft_contacts_set(self, soft_contacts):\n if soft_contacts.time.size != 1:\n raise IndexError(\"soft_contacts should be from one frame only\")\n self.soft_contacts = soft_contacts\n\n # Remove previous actors from the scene\n for actor in self.soft_contacts_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.soft_contacts_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtk.vtkPoints()\n for i in range(soft_contacts.channel.size):\n # Create a mapper\n mapper = vtkPolyDataMapper()\n\n # Create an actor\n self.soft_contacts_actors.append(vtkActor())\n self.soft_contacts_actors[i].SetMapper(mapper)\n\n self.parent_window.ren.AddActor(self.soft_contacts_actors[i])\n # Update marker position\n self.update_soft_contacts(self.soft_contacts)", "def update_servicech(self, conf, phone_num, body):\n\t\tpass", "def resulting_contact(self, resulting_contact):\n \n self._resulting_contact = resulting_contact", "def list_contacts(self):\n return self.contacts", "def update_data():\n pass", "def contact_edit(request, contact_id):\n contact = get_object_or_404(Contact, pk=contact_id)\n if request.method == \"POST\":\n form = ContactForm(request.POST, instance=contact)\n profile_form = ContactProfileForm(request.POST, instance=contact.contactprofile)\n\n if form.is_valid() and profile_form.is_valid():\n contact = form.save()\n profile_form.save()\n\n messages.success(request, _(\"The contact %(name)s was successfully updated\") % \n {'name': unicode(contact)})\n return HttpResponseRedirect(reverse(\"moderation.views.contact\", args=(contact.pk,)))\n else:\n form = ContactForm(instance=contact)\n profile_form = ContactProfileForm(instance=contact.contactprofile)\n\n return render_to_response(\"contact_edit.html\", \n { 'contact': contact, \n 'form': form, \n 'profile_form': profile_form,\n },\n context_instance=RequestContext(request))", "def update():", "def update():", "def on_update(self):\n\t\tfor email_account in frappe.get_all(\"Email Account\", filters={\"domain\": self.name}):\n\t\t\ttry:\n\t\t\t\temail_account = frappe.get_doc(\"Email Account\", email_account.name)\n\t\t\t\tfor attr in [\"email_server\", \"use_imap\", \"use_ssl\", \"use_tls\", \"attachment_limit\", \"smtp_server\", \"smtp_port\", \"use_ssl_for_outgoing\", \"append_emails_to_sent_folder\", \"incoming_port\"]:\n\t\t\t\t\temail_account.set(attr, self.get(attr, default=0))\n\t\t\t\temail_account.save()\n\n\t\t\texcept Exception as e:\n\t\t\t\tfrappe.msgprint(_(\"Error has occurred in {0}\").format(email_account.name), raise_exception=e.__class__)", "def contactListClicked(self):\n \n contacts = self.userList.getSelectedItems()\n self.mergeButton.setEnabled(contacts != None and len(contacts) > 1)\n \n if contacts != None and len(contacts) == 1:\n self.messageList.filterByContact(contacts[0])\n else:\n self.messageList.removeFilter()", "def test_get_contacts(self):\n pass", "def updateStatus(request,template='contacts/person/update_status.html'):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n person_list = Person.objects.filter(status__in = ['pendent','ok_notpaid'])\n registres = 0\n\n for person in person_list:\n status = calculaStatus(person)\n if status != person.status:\n person.status = status\n person.save()\n registres = registres + 1\n\n return render_to_response(template, {'registres': registres}, RequestContext(request))", "def migrate_fb_contact():\n if request.method == 'GET':\n tel = request.args.get('tel')\n uuid = request.args.get('uuid')\n try:\n phone_contact = mx_client.get_contacts(urn=['tel:+52' + tel]).all()\n if phone_contact:\n create_thread_fb(phone_contact, uuid)\n return jsonify({\"Migrado\": \"Si\"}), 201\n except:\n pass\n return jsonify({\"Migrado\": \"No\"}), 404", "def clearContactsFromPhone():\n\tprint \"Deleting any contacts from phone...\"\n\tcmd =r\"adb shell pm clear com.android.providers.contacts\"\n\tos.system(cmd)\n\tprint \"Finished deleting contacts from phone.\"", "def test_update_contact_association(self):\n patient1 = self.create_patient({'mobile_number': '12223334444'})\n patient2 = self.create_patient()\n subject_number = patient1.subject_number\n node = self.create_xml_patient({'Subject_Number': subject_number,\n 'Mobile_Number': '43332221111'})\n payload = self.create_payload([node])\n parse_patient(node, payload)\n patient = payload.patients.all()[0]\n self.assertNotEqual(patient.pk, patient2.pk)\n self.assertEqual(patient.pk, patient1.pk)\n self.assertNotEqual(patient.contact.pk, patient2.contact.pk)\n self.assertEqual(patient.contact.pk, patient1.contact.pk)\n self.assertEqual(patient.mobile_number, '+43332221111')\n self.assertEqual(patient.contact.phone, '+43332221111')", "def _update_from_rest_data(self) -> None:", "def get_or_update_contact(request, **kwargs):\n contact_id = kwargs['id']\n contact = private.Contact()\n if request.method.lower() == 'get':\n data = contact.get_contact(contact_id)\n return JsonResponse(data)\n elif request.method.lower() == 'post':\n data = json.loads(request.body)\n try:\n contact.check_and_update(pk=contact_id, name=data.get('name'))\n except exception.ContactException as ex:\n return JsonResponse({'error_message': ex.message})\n elif request.method.lower() == 'delete':\n contact.delete(pk=contact_id)\n return JsonResponse({'success_message': 'Contact updated successfully.'})", "def appendedEntries(self):\n self.contact_list.append({\"name\": self.first_name.title() + \" \" + self.last_name.title(), \"phone number\": self.phone_number, \"phone number type\": self.phone_number_type})", "def Run(self):\n return self.ListAllContacts()", "def display_contact(self):\n contacts = \"\".join(str(contact) for contact in self.contact_list)\n print(contacts)", "def update(self):\n\n pass", "def contact_points(self, contact_points: object):\n\n self._contact_points = contact_points", "def update( ):\r\n pass", "def _get_contacts(self, tgt):\n with open(tgt, mode='r', encoding='utf-8') as f:\n str_contents = f.read()\n self.contacts = json.loads(str_contents)\n return", "def set_contacts_opacity(self, contacts_opacity):\n self.contacts_opacity = contacts_opacity\n self.update_contacts(self.contacts)", "def tearDown(self):\n Contact.contact_list = []", "def delete_contact(self):\n delete_first_name = input(\"Enter first name that you want to delete\\n\")\n for contact in self.contact_list:\n if contact.first_name == delete_first_name:\n #print(str(contact))\n self.contact_list.remove(contact)\n else:\n print(f\"No contact is present with first name {delete_first_name} \")", "def set_raw_contact(self, value: Atoms):\n self._raw_contact = value", "def add_contact_to_google_account(self, i):\n\n self.add_contact_to_phone(i)", "def update(self):\n # TO DO for updating urls if changed\n pass", "def add_contact(self):\n contact_list = {}\n contact_list[self.my_number] = self.name\n connect_db = Database()\n connect_db.add_contact(self.name, self.my_number)", "def mergeContacts(self):\n self.mergeDialog = MergeDialog(self.db, self.userList.getSelectedItems())\n self.mergeDialog.accepted.connect(self.refreshLists)\n self.mergeDialog.show()" ]
[ "0.75325215", "0.71713334", "0.71713334", "0.70067936", "0.6914438", "0.687053", "0.68023425", "0.67809993", "0.67450345", "0.6729109", "0.67007273", "0.66972715", "0.6619613", "0.65905577", "0.64673847", "0.639455", "0.63806546", "0.62849957", "0.6272024", "0.62464106", "0.61067134", "0.60251516", "0.598505", "0.5975021", "0.5975021", "0.5943581", "0.5870916", "0.5870609", "0.5868617", "0.5861066", "0.5859448", "0.5841894", "0.5825421", "0.58039004", "0.5785754", "0.57635736", "0.5733628", "0.5732261", "0.57227933", "0.57179725", "0.5713616", "0.5712923", "0.5711663", "0.5708961", "0.5708424", "0.5705682", "0.5689533", "0.5676564", "0.56509054", "0.5638196", "0.56203663", "0.56023186", "0.5595118", "0.5577964", "0.5558505", "0.5528108", "0.55250764", "0.5518349", "0.5501765", "0.5479232", "0.54533136", "0.545188", "0.5444332", "0.54286397", "0.54281867", "0.54179084", "0.5417688", "0.54091537", "0.54040647", "0.5399597", "0.53820485", "0.5377396", "0.5375846", "0.53733337", "0.53697973", "0.53697973", "0.53604305", "0.5358829", "0.5341169", "0.5337609", "0.53322786", "0.53219795", "0.5301071", "0.5299936", "0.52995676", "0.52955186", "0.52818173", "0.52695584", "0.52684957", "0.52649426", "0.526124", "0.5256539", "0.5256254", "0.52552813", "0.5253672", "0.5252535", "0.5240767", "0.52339065", "0.5218627", "0.52186114" ]
0.7861427
0
Delete all contact from database
def delete_contacts(self): self.db.delete_all_contacts() return self.update_contacts()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RemoveAll(self):\n\t\tcontacts = self.GetContactList()\n\t\t\n\t\tfor contact in contacts:\n\t\t\tself.BatchEnqueue('delete', contact)\n\t\tself.ExecuteBatchQueue()", "def del_contact_all(self):\n\n send_key(KEY_MENU)\n delstr = contact.get_value('contact_delete')\n if search_text(delstr):\n click_textview_by_text(delstr)\n click_checkbox_by_id('select_all_check')\n click_button_by_id('btn_ok')\n click_button_by_index(1)\n else:\n goback()\n\n sleep(2) #take a rest to wait view ...", "def delete_all(cls):\n with sqlite3.connect(cls.dbpath) as connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n SQL = \"DELETE FROM accounts;\"\n cursor.execute(SQL)", "def del_all_records():\n delete_alles = Customer.delete().where(Customer.name >= '')\n delete_alles.execute()", "def deleteAll(self):\n self.db.execute(\"DELETE FROM MATCH;\", ())", "def delete_contact_in_db(self):\n self.init_db(self._testing)\n\n # making sure that the object is in the db\n assert not self.uid == \"\"\n\n self._delete_row_in_db(Contact.table_name, (self.uid,))", "def deleteAllRecord(collection):\n collection_name = collection\n collection = db[collection_name]\n collection.delete_many({})\n\n print(\"Deleting all records from \" + collection_name)\n print(\"Finished operation. Collection cleared.\")\n print(\"--------- \\n\")", "def clearContactsFromPhone():\n\tprint \"Deleting any contacts from phone...\"\n\tcmd =r\"adb shell pm clear com.android.providers.contacts\"\n\tos.system(cmd)\n\tprint \"Finished deleting contacts from phone.\"", "def delete_contacts(self, contacts):\n self._post('contact_actions', None, self._build_params(contacts=contacts, action='delete'))", "def delete_all(self):\n query = \"\"\"MATCH(n) DETACH DELETE n\"\"\"\n return self.create_tx(query)", "def deleteAll(tx):\n query = (\n\n \"MATCH(p1:Person)-[a:APP_CONTACT]->(p2:Person)\"\n \"WHERE a.date < date() - duration({Days: 10}) OR (a.date = date() - duration({Days: 10}) AND a.hour < time())\"\n \"DELETE a\"\n\n )\n\n tx.run(query)", "def delete_all(sid):\n Game.objects.all().delete()", "def delete_all_domain_pages():\r\n db = connect()\r\n cursor = db.cursor()\r\n try:\r\n cursor.execute(\"DELETE FROM domain_pages\")\r\n db.commit()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()", "def delete_all_users(self):\n\n User.query.delete()", "def delete_all(self):\n raise NotImplementedError()", "def deleteAll():\n _table.deleteAll()\n _initialiseGlobals()\n\n return", "def delete_all(cls):\n cls.dbm().modelclass_deleteall(cls)", "def delete_all(self):\n self.session.query(TodoItem).delete()\n self.session.query(TodoList).delete()", "def delete(self):\n self.skype.conn(\"DELETE\", \"{0}/users/{1}/contacts/8:{2}\"\n .format(SkypeConnection.API_CONTACTS, self.skype.userId, self.id),\n auth=SkypeConnection.Auth.SkypeToken)\n self.skype.conn(\"DELETE\", \"{0}/users/ME/contacts/8:{1}\".format(self.skype.conn.msgsHost, self.id),\n auth=SkypeConnection.Auth.RegToken)", "def delete_all_books(self):\n try:\n with self._db as db:\n cur = db.cursor()\n cur.execute('DELETE FROM books')\n except sqlite3.Error as e:\n raise BookError('Error deleting all books') from e", "def delete_all_messages(conn):\n sql = 'DELETE FROM LED_MESSAGE'\n cur = conn.cursor()\n cur.execute(sql)\n conn.commit()", "def empty_db(self):\n try:\n self.cur.execute(\"DELETE FROM Crashes;\")\n self.con.commit()\n print 'Deleted all records'\n\n except sqlite.Error, e:\n print 'Unable to delete all records.'\n print 'Exception follows:'\n print e", "def delete_all(self):\n models.CourseLearningOutcome.objects.all().delete()\n #models.CoreLearningOutcome.objects.all().delete()\n #models.CreditType.objects.all().delete()\n models.Course.objects.all().delete()\n models.DegreeProgram.objects.all().delete()\n models.DPCourseSpecific.objects.all().delete()\n models.DPCourseGeneric.objects.all().delete()\n models.DPCourseSubstituteSpecific.objects.all().delete()\n models.DPCourseSubstituteGeneric.objects.all().delete()", "def deleteAll(self):\n self.deleteAttributeRange() #Default args = everything", "def delete_all_users():\n\tUser.drop_collection()", "def delete_all_from(self, tablename):\n query = 'delete from ' + tablename\n try:\n self.__cur.execute(query)\n self.__conn.commit()\n except Exception as e:\n self.__conn.rollback()\n raise e", "def delete():", "async def delete_contact(dbcon: DBConnection, contact_id: int) -> None:\n if not await contact_exists(dbcon, contact_id):\n raise errors.InvalidArguments('contact does not exist')\n q = \"\"\"delete from contacts where id=%s\"\"\"\n await dbcon.operation(q, (contact_id,))", "def delete_all():\n answer = ['YES', 'NO']\n str = rs.GetString(\"Delete all objects?\", 'YES', answer)\n\n if str == 'YES':\n obs = rs.ObjectsByType(0)\n rs.DeleteObjects(obs)\n elif str == 'NO':\n pass\n else:\n sys.exit()", "def test_delete_contact(self):\n self.new_contact.save_contact()\n # new contact\n test_contact = Contact(\"Test\", \"user\", \"0745639300\", \"[email protected]\")\n # new contact saved\n test_contact.save_contact()\n # For deleting the new contact\n self.new_contact.delete_contact()\n self.assertEqual(len(Contact.contact_list), 1)", "def del_contact(contact):\n db = get_db()\n \n if contact.get_hash_name() in db:\n db.pop(contact.get_hash_name())\n write_db(db)\n sys.exit(logger.ok('success: contact ' + '\"%s\"' % contact.get_name() + ' deleted'))\n else:\n sys.exit(logger.fail('fatal: contact does not exist'))", "def clean_all_db():\n for model in [\n Component, Arch, AutoCase, AutoCaseFailure, Bug, Linkage, WorkItem,\n Document, Project, Framework]:\n model.objects.all().delete()", "def deleteMatches():\n c.execute(\"DELETE FROM matchup\");\n print \"All matches have been successfully deleted\"\n return", "def delete():\n\n from slicr.extensions import db\n\n click.echo('deleting database...')\n\n db.drop_all()", "def delete_all_students(connection):\r\n with connection:\r\n return connection.execute(DELETE_ALL_STUDENTS)", "def delete_all(self, Model):\n ndb.delete_multi(\n Model.query().fetch(keys_only=True)\n )", "def deleteMatches():\n #deletes the contents of table matches\n DB().execute(\"DELETE FROM matches\", True)", "def test_projects_id_contacts_delete(self):\n project = Contact()\n response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def clear_db(self):\n self.cursor.execute(\"DELETE FROM TrackPoint\")\n self.cursor.execute(\"DELETE FROM Activity\")\n self.cursor.execute(\"DELETE FROM User\")\n self.db_connection.commit()", "def do_delContact(self, line):\n\t\tif not(self.db is None):\n\t\t\ttry:\n\t\t\t\tself.db.contact.delete_one({'_id': ObjectId(line)})\n\t\t\texcept Exception:\n\t\t\t\tprint(\"This id doesn't exist!\")\n\t\telse:\n\t\t\tprint(\"You must open the existing database or create new one.\")", "def delete(self):\n\n\n try:\n db = getDatabase()\n connection = db.connect()\n\n connection.delete(self)\n except Exception as e:\n raise e\n finally:\n db.dispose()", "def delete_contacts_module_db(request, module_db_id):\n errors = None\n success = False\n if request.method == 'POST':\n try:\n json_obj = json.loads(request.body)\n list_ids = json_obj.get('list', '')\n try:\n for item_id in list_ids:\n contact = Contact.objects.get(list_owner__id=module_db_id, id=item_id)\n contact.delete()\n success = True\n except Contact.DoesNotExist as e:\n errors = e.args\n except ModuleContactListDB.DoesNotExist as e:\n errors = e.args\n\n data = {'success': success, 'errors': errors}\n return json_response(data)", "def delete_scheduled_events():\n\n connections = Connection.objects.all()\n\n for conn in connections:\n if conn.contact is None:\n conn.delete()", "def delete_all_chores():\n try:\n num_rows_deleted = session.query(Chores).delete()\n session.commit()\n return \"{} records deleted\\n\".format(num_rows_deleted)\n except:\n session.rollback()", "def delete_contact(self):\n delete_first_name = input(\"Enter first name that you want to delete\\n\")\n for contact in self.contact_list:\n if contact.first_name == delete_first_name:\n #print(str(contact))\n self.contact_list.remove(contact)\n else:\n print(f\"No contact is present with first name {delete_first_name} \")", "def remove_all():\n \"\"\" Removes all from the database \"\"\"\n redis_store.flushall()", "def delete(max_iterations):\n persons = get_persons()\n count = 0\n for person in persons:\n if count > max_iterations:\n return\n count += 1\n if choice([0, 1]):\n params = {\"event\": \"contact.delete\",\n \"data\": {\"id\": person['id']}}\n request(params)", "def delete_all_onprogress_domains():\r\n db = connect()\r\n cursor = db.cursor()\r\n try:\r\n cursor.execute(\"DELETE FROM on_progress_domains\")\r\n db.commit()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()", "def test_delete_contact(self):\n self.new_contact.save_contact()\n test_contact = Contact(\"Test\", \"User\", 254712345678, \"[email protected]\") # new contact\n test_contact.save_contact()\n self.new_contact.delete_contact() # delete a contact object\n self.assertEqual(len(Contact.contact_list), 1)", "def deleteMatches():\n conn, cur = connect()\n query = \"TRUNCATE matches CASCADE;\"\n try:\n cur.execute(query)\n except:\n print(\"Error encountered deleting all matches.\")\n conn.commit()\n conn.close()", "def removeall(table):\n doall(\"DELETE FROM {table}\".format(table=table))", "def delete_all_book(request):\n all_books = Book.objects.all()\n for book in all_books:\n book.pdf.delete()\n book.cover.delete()\n book.delete()\n return redirect('book_list')", "def clean_database(self):\n for name in list(self.database):\n self._remove_database_entry(name)", "def deleteMatches():\n db, cursor = connect()\n cursor.execute(\"DELETE FROM matches\")\n db.commit()\n db.close()", "def deleteDB():\n db = sqlite.connect(db_path)\n db.row_factory = sqlite.Row\n cursor = db.cursor()\n cursor.execute(\"DELETE from rooms\")\n\n cursor.execute(\"DELETE from users\")\n\n cursor.execute(\"DELETE from urls\")\n\n cursor.fetchall()\n db.commit()\n cursor.close()\n db.close()", "def rm_contact_from_addressbook(database, name, surname, database_counter,\n database_ids):\n\n from addressbook.verify_contact import check_if_contact_exists\n\n if check_if_contact_exists(database, name, surname, database_counter,\n database_ids)[0] == 'Yes':\n print('The following contact will be removed:')\n id = check_if_contact_exists(database, name, surname, database_counter,\n database_ids)[1]\n print(str(id), '|', database[f'{id}']['first name'], '|',\n database[f'{id}']['last name'],\n '|', database[f'{id}']['address'], '|',\n database[f'{id}']['mobile phone'])\n del database[f'{id}']\n print('\\n')\n return id\n else:\n print('There is no such contact for deletion!')\n print('\\n')\n return 0", "def delete(self):\n with sqlite3.connect(self.dbpath) as connection: \n cursor = connection.cursor()\n DELETESQL = \"\"\"DELETE FROM accounts WHERE id=:id \"\"\"\n cursor.execute(DELETESQL, {\"id\": self.id})\n self.id = None", "def deleteMatches():\n db = connect()\n db_cursor = db.cursor()\n query = \"DELETE FROM matches\"\n db_cursor.execute(query)\n db.commit()\n db.close()", "def delcontact(id):\n delid = str(id)\n\n try:\n r.srem(\"contacts\", delid, 1)\n\n r.delete(\"uid:\" + delid + \":name\")\n r.delete(\"uid:\" + delid + \":address\")\n r.delete(\"uid:\" + delid + \":phone\")\n r.delete(\"uid:\" + delid + \":email\")\n\n return {}\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def clear_data():\n conn = get_connect()\n #conn.execute(\"DELETE from match\")\n #conn.execute(\"DELETE from account\")\n #conn.execute(\"DELETE from championMatchData\")\n conn.execute(\"DELETE from championData\")\n conn.commit()\n conn.close()\n print(\"all data in info.db has been cleared\")\n return", "def delete_all_teachers(connection):\r\n with connection:\r\n return connection.execute(DELETE_ALL_TEACHERS)", "async def _wipe_casino(self, ctx):\n await self.db.clear_all()\n msg = \"{0.name} ({0.id}) wiped all casino data.\".format(ctx.author)\n await ctx.send(msg)", "async def delete_all_games(self):\n all_games = await ex.conn.fetch(\"SELECT gameid FROM blackjack.games\")\n for games in all_games:\n game_id = games[0]\n await self.delete_game(game_id)", "def deleteMatches():\n dbconnection = connect()\n dbcursor = dbconnection.cursor()\n dbcursor.execute(\"DELETE FROM matches\")\n dbconnection.commit()\n dbconnection.close()", "def delete_all_rows(self, table):\n sql = f\"DELETE FROM {table}\"\n connection = self.__create_connection()\n cur = connection.cursor()\n cur.execute(sql)\n cur.close()\n connection.commit()", "def delete_all(collection: Collection):\n return collection.delete_many({}).deleted_count", "def delete_all_records(db):\n with tables(db.engine) as (connection,):\n metadata = sqlalchemy.MetaData(bind=connection)\n metadata.reflect()\n # We delete the tables in order of dependency, so that foreign-key\n # relationships don't prevent a table from being deleted.\n for tbl in reversed(metadata.sorted_tables):\n tbl.delete().execute()", "def delete_all(table_name):\n with get_connection() as conn:\n return rethink.table(table_name).delete().run(conn)", "def deleteMatches():\n db_conn = connect()\n db_cursor = db_conn.cursor()\n db_cursor.execute(\"delete from matches;\")\n db_conn.commit()\n db_conn.close()", "def deleteall(cls, transaction):\n return Delete(\n From=cls.table,\n Where=None,\n ).on(transaction)", "def deleteMatches():\n DB = dbc()\n DB.cursor().execute('DELETE FROM matches')\n DB.commit()\n DB.close()", "def _purge(self):\n for _ in self.all():\n self.delete(_)", "def clear_all(delete_id):\n Tasks.query.filter(Tasks.project_id == delete_id).delete()\n Projects.query.filter(Projects.project_id == delete_id).delete()\n db.session.commit()\n\n return redirect('/')", "def clean_exam():\n data = Exam.objects.all()\n data.delete()", "def delete_db():\n db.drop_all()", "def run(self):\n self.db.table('purchases').delete()\n self.db.table('payments').delete()", "def _delete_all(self):\n logging.info(\"Remove all nodes and relations from database.\")\n self.graph.delete_all()\n return", "def remove_all(self):\n # Post a delete all notice to the manager\n self._remove_all()", "def remove_all(self):\n # Post a delete all notice to the manager\n self._remove_all()", "def clear_db(db : Session = Depends(get_db)):\n db.query(Acti).delete()\n db.query(Wrist).delete()\n db.commit()", "def deleteMatches():\n DB = connect()\n c = DB.cursor()\n c.execute(\"DELETE FROM matches\")\n DB.commit()\n DB.close()", "def _delete_all(self, criteria: Q = None):\n conn = self._get_session()\n\n del_count = 0\n if criteria:\n qs = conn.query(self.model_cls).filter(self._build_filters(criteria))\n else:\n qs = conn.query(self.model_cls)\n\n try:\n del_count = qs.delete()\n except DatabaseError as exc:\n logger.error(f\"Error while deleting all: {exc}\")\n raise\n finally:\n if not current_uow:\n conn.commit()\n conn.close()\n\n return del_count", "def deleteMatches():\n dbConn = connect()\n c = dbConn.cursor()\n c.execute(\"DELETE FROM match\")\n dbConn.commit()\n dbConn.close()", "def removeall(subdomain):\n\tTarget.query.filter(Target.subdomain.like(f\"%{subdomain}%\")).delete(synchronize_session='fetch')\n\tdb.session.commit()\n\tprint(\"deleted\",sub)", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def full_reset(self):\n for docid in self.iter_docids():\n self.delete(docid)\n self.client.delete(self.dbprefix + 'schema')\n self.client.delete(self.dbprefix + 'docs')\n self.client.delete(self.dbprefix + 'nextid')", "def delete(self):\n ...", "def deletePlayers():\n conn, cur = connect()\n query = \"TRUNCATE players CASCADE;\"\n try:\n cur.execute(query)\n except:\n print(\"Error encountered deleting all players\")\n conn.commit()\n conn.close()", "async def delete_contact_from_contact_group(dbcon: DBConnection, contact_group_id: int, contact_id: int) -> None:\n q = \"\"\"delete from contact_group_contacts where contact_group_id=%s and contact_id=%s\"\"\"\n q_args = (contact_group_id, contact_id)\n await dbcon.operation(q, q_args)", "def delete(self):\n\n def _delete(result):\n oldid = self.id\n self.id = None\n self._deleted = True\n if self._transaction:\n return self.__class__.deleteAll(where=[\"id = ?\", oldid], transaction=self._transaction)\n else:\n return self.__class__.deleteAll(where=[\"id = ?\", oldid])\n\n def _deleteOnSuccess(result):\n if result == False:\n return defer.succeed(self)\n else:\n ds = []\n for relation in self.HABTM:\n name = relation['name'] if isinstance(relation, dict) else relation\n ds.append(getattr(self, name).clear(transaction=self._transaction))\n return defer.DeferredList(ds).addCallback(_delete)\n\n return defer.maybeDeferred(self.beforeDelete).addCallback(_deleteOnSuccess)", "def _delete (self):\n self._exec ('delete from table_name where id=%(id)s')", "def deletePlayers():\n #deletes the contents of table players\n DB().execute(\"DELETE FROM players\", True)", "def clear_db():\n humans = Human4j.nodes.all()\n for h in humans:\n h.delete()\n binomes = Binome4j.nodes.all()\n for b in binomes:\n b.delete()\n projects = Project4j.nodes.all()\n for p in projects:\n p.delete()\n sherpas = Sherpa4j.nodes.all()\n for sh in sherpas:\n sh.delete()\n students = Pioupiou4j.nodes.all()\n for piou in students:\n piou.delete()\n partenaires = Partenaire4j.nodes.all()\n for part in partenaires:\n part.delete()\n ps = Planete_Solidaire.nodes.all()\n for misc in ps:\n misc.delete()", "def delete(self, id):\n return Contacts().delete_one(id)", "def deleteMatches():\n conn, c = connect()\n c.execute(\"DELETE FROM matches;\")\n conn.commit()\n conn.close()", "def clear_all():\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete()", "def delete_all(self):\n with self.__lock:\n self.__data = dict()\n self.flush()" ]
[ "0.75905544", "0.7518111", "0.7328898", "0.72868043", "0.7197841", "0.71404386", "0.6918222", "0.6866233", "0.68557423", "0.675412", "0.6746233", "0.6720648", "0.6713548", "0.67038", "0.66817254", "0.6613652", "0.66043967", "0.6591996", "0.65602887", "0.65030277", "0.6497577", "0.6491358", "0.64789325", "0.64758885", "0.6444053", "0.64417845", "0.6436533", "0.64049757", "0.6400515", "0.63911605", "0.63910437", "0.638919", "0.6386331", "0.63820064", "0.63752514", "0.6347126", "0.63435537", "0.6339924", "0.63377607", "0.6331757", "0.6331228", "0.6323167", "0.63153553", "0.63151634", "0.62999976", "0.6291397", "0.6279865", "0.6276521", "0.6272047", "0.62662715", "0.6251863", "0.62410134", "0.62401605", "0.62198097", "0.62086695", "0.6205562", "0.61959535", "0.6184844", "0.617197", "0.6158423", "0.61526185", "0.6150249", "0.6150232", "0.6148772", "0.6147519", "0.61466014", "0.6141619", "0.6138763", "0.6135508", "0.6134318", "0.61333364", "0.6128489", "0.6125986", "0.61213493", "0.61198884", "0.61167467", "0.6110328", "0.61102486", "0.61102486", "0.61080265", "0.6105941", "0.60979146", "0.6090709", "0.60814476", "0.6070025", "0.6070025", "0.6070025", "0.6070025", "0.60618263", "0.606132", "0.6060375", "0.6046846", "0.6041087", "0.60392773", "0.6035893", "0.6033018", "0.60257167", "0.6017233", "0.6011279", "0.6009693" ]
0.8326911
0
List contact by searching for a given value
def list_contact(self, key, value): self.db.list_contact( key, value, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search_contact_list(self):\n\n search_db = Database()\n result = search_db.contact_search(self.name)\n if not result:\n print Fore.YELLOW + ' No such contact'\n return None\n if result > 1:\n print ' Which contact ??'\n for items in result:\n if items[2] > 1:\n print Fore.BLUE + ' %s %s %s' % ([items[0]], items[1], items[2])\n else:\n print str(items[1]), items[2]\n\n return result", "def pull_one_contact(self, name):\n contact = []\n for x in self.contacts:\n if x[0] == name:\n contact_name = x[0]\n number = x[1]\n email = x[2]\n zipcode = x[3]\n contact = [contact_name, number, email, zipcode]\n print(contact)\n return contact, self.contacts.index(x)", "def search(self, name):\n\t\tmatching_contacts = []\n\t\tfor contact in self:\n\t\t\tif name in contact.name:\n\t\t\t\tmatching_contacts.append(contact)\t\n\t\treturn matching_contacts", "def search_contact():\n if request.method == 'GET':\n tel = request.args.get('tel')\n contact = io_client.get_contacts(urn=['tel:+52' + tel]).all()\n if contact:\n return jsonify({\"existe\": \"Si\"}), 201\n return jsonify({\"existe\": \"No\"}), 404", "def find_entry(key):\n found_list = []\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n for k in db:\n name = str(k).lower()\n phone = str(db[k])\n if (name.find(key.lower())) >= 0 or (phone.find(key.lower()) >= 0):\n person = Person()\n person.name = k\n person.phone = db[k]\n found_list.append(person)\n display_list(found_list)\n db.close()", "def search_contact(request, **kwargs):\n limit = int(request.GET.get('limit', constants.DEFAULT_LIMIT))\n offset = int(request.GET.get('offset', constants.DEFAULT_OFFSET))\n search_term = request.GET.get('search_term')\n contact = private.Contact()\n data = contact.fetch_list(limit, offset, search_term)\n return JsonResponse({'objects': data})", "def list_contacts(self, prefix):\n sub_trie = self.find(prefix.lower())\n _crawl_trie(sub_trie, prefix)", "def do_search(self, line):\n\t\t# if isinstance(self.cl, Book):\n\t\t# \tst = time()\n\t\t# \tprint(\"\\nSearch results for: \",str(line))\n\t\t# \tfor i in self.cl.search_contact(str(line)):\n\t\t# \t\tprint(i)\n\t\t# \tprint('Time elapsed: {:10f}'.format(time()-st))\n\t\t# else:\n\t\t# \tprint(\"To search contacts you need to open or create a book.\")\n\t\tif isinstance(self.cl, Book):\n\t\t\tst = time()\n\t\t\tprint(\"\\nSearch results for: \",str(line))\n\t\t\tw = []\n\t\t\tfor i in range(0,len(self.cl.data),10000):\n\t\t\t\tw.append(self.cl.data[i:i+10000])\n\t\t\tworkers = []\n\t\t\tfor i in w:\n\t\t\t\tworkers.append(Process(target=self.cl.search_contact, args=(i, str(line))))\n\t\t\tfor worker in workers:\n\t\t\t\tworker.start()\n\t\t\tfor worker in workers:\n\t\t\t\tworker.join()\n\t\t\tprint('Time elapsed: {:10f}'.format(time()-st))\n\t\telse:\n\t\t\tprint(\"To search contacts you need to open or create a book.\")", "def search(self, value):\n pass", "def do_fsearch(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\tst = time()\n\t\t\tprint(\"\\nSearch results for: \",str(line))\n\t\t\tfor i in self.cl.fsearch_contact(str(line)):\n\t\t\t\tprint(i)\n\n\t\t\tprint('Time elapsed: {:10f}'.format(time()-st))\n\t\telse:\n\t\t\tprint(\"To search contacts you need to open or create a book.\")", "def __ui_search_persons_by_phone_number(self):\n searched_phone_number = input(\"Introduce the phone number: \").strip().lower()\n if searched_phone_number == \"\":\n print(\"You cannot search persons by an empty phone number!\\n\")\n return\n\n searched_persons = self.__person_service.find_persons_by_phone_number(searched_phone_number)\n\n if len(searched_persons) == 0:\n print('There is no person whose phone number matches with \"{}\"!\\n'.format(searched_phone_number))\n else:\n print(\"\")\n for person in searched_persons:\n print(person)\n print(\"\")", "def list_contact(name):\n db = get_db()\n name = hashlib.sha256(name).hexdigest()\n \n if name in db:\n info = db[name]\n print logger.ok(\"\"\"\n Contact Information:\n Name: %s\n Phone Number: %s\n Email Address: %s\n \"\"\" % (info['name'], info['phone'], info['email']))\n else:\n sys.exit(logger.fail('fatal: contact does not exist'))", "def list_contacts(self):\n return self.contacts", "def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts", "def Collection_search_name(C:list, name:str) -> list:\r\n restaurants = []\r\n for r in C:\r\n for dish in r.menu:\r\n if name in dish.name:\r\n restaurants.append(r)\r\n return restaurants", "def search_for_email_given_job(job_description: str, contacts: str):\n # create an empty list of the contacts\n contacts_list = []\n # --> refer to the file called inputs/contacts.txt to learn more about\n # the format of the comma separated value (CSV) file that we must parse\n # --> iterate through each line of the file and extract the current job\n for contact_line in csv.reader(\n contacts.splitlines(),\n quotechar='\"',\n delimiter=\",\",\n quoting=csv.QUOTE_ALL,\n skipinitialspace=True,\n ):\n # TODO: extract the current job for the contact on this line of the CSV\n # TODO: the job description matches and thus we should save it in the list\n # return the list of the contacts who have a job description that matches\n return contacts_list", "def do_multi_search(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\tprint(\"\\nSearch results for: \",str(line))\n\t\t\tt = time()\n\t\t\tself.cl.pool_str = str(line)\n\t\t\tpool = Pool(4)\n\t\t\tpool.imap_unordered(self.cl.multi_search_contact, self.cl.data, chunksize=50000)\n\t\t\tpool.close()\n\t\t\tpool.join()\n\t\t\tprint(\"time elapsed: {:10f}\".format(time()- t))\n\t\telse:\n\t\t\tprint(\"To search contacts you need to open or create a book.\")", "def search(self, value):\n return self._search(self.head, value)", "def getcontacts():\n contacts = {}\n\n try:\n #get list of contact ids\n contactids = r.smembers(\"contacts\")\n\n #for each contact id get data\n for contactid in contactids:\n contacts.update(_getcontact(str(contactid)))\n return contacts\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def mutt_search(self, term):\n attrs = (\"email_address\", \"name\", \"otherinfo\", \"extrainfo\")\n ret = list(\n filter(lambda aitem: any(\n term in getattr(aitem, attr, \"\") for attr in attrs\n ), self.addresses)\n )\n return ret", "def display_contact(self):\n contacts = \"\".join(str(contact) for contact in self.contact_list)\n print(contacts)", "def get(self, field: str, value: str):\n data = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'CONTACTNAME',\n 'COMPANYNAME',\n 'FIRSTNAME',\n 'LASTNAME',\n 'INITIAL',\n 'PRINTAS',\n 'TAXABLE',\n 'MAILADDRESS.ADDRESS1'\n ]\n },\n 'filter': {\n 'equalto': {\n 'field': field,\n 'value': value\n }\n },\n 'pagesize': '2000'\n }\n }\n\n return self.format_and_send_request(data)['data']", "def Run(self):\n return self.ListAllContacts()", "def do_search(self, line):\n\t\tif not(self.db is None):\n\t\t\tstart = time()\n\t\t\tresult = self.db.contact.find({'$or':[\n\t\t\t\t\t{'first_name': {'$regex':line, '$options':'i'}},\n\t\t\t\t\t{'surname': {'$regex':line, '$options':'i'}},\n\t\t\t\t\t{'company': {'$regex':line, '$options':'i'}},\n\t\t\t\t\t{'address': {'$regex':line, '$options':'i'}},\n\t\t\t\t\t{'telephone': {'$regex':line, '$options':'i'}},\n\t\t\t\t\t{'email': {'$regex':line, '$options':'i'}},\n\t\t\t\t\t{'id_': {'$regex':line, '$options':'i'}}\n\t\t\t\t]})\n\t\t\tfor i in result:\n\t\t\t\tpprint.pprint(i)\n\t\t\tprint(\"Time elapsed: {}\".format(time()-start))\n\t\telse:\n\t\t\tprint(\"You must open the existing database or create new one.\")", "def find(self, start: ghidra.program.model.address.Address, values: List[int]) -> ghidra.program.model.address.Address:\n ...", "def lookup(name, phonebook):\n\n phonebook_data = read_phonebook(phonebook)\n\n match = False\n for entry_name in phonebook_data:\n if name.lower() in entry_name.lower():\n match = True\n print entry_name, phonebook_data[entry_name]\n\n if not match:\n print \"No matches found.\"", "def get_contacts(userid):\n return 'get contacts - ' + userid", "def get_contacts_list(self):\n return [(id + 1, contact) for id, contact in enumerate(self.contact_list)]", "def search_for_customer(f_name: str, l_name: str):\n return cr.search_for_customer(f_name=f_name, l_name=l_name)", "def search(trie, query):\n try:\n trie.list_contacts(query)\n for value, _ in distance_words(result, query):\n print(value)\n except Exception:\n logging.debug(traceback.format_exc())\n print(\"Not Found!\")", "def getPeopleInAddressBook(group_name=None):\n ab = ABAddressBook.sharedAddressBook()\n people = None\n if not group_name:\n people = ab.people()\n else:\n for group in ab.groups():\n if group.name() == group_name:\n people = group.members()\n if people == None:\n print \"No contacts could be found for given group\"\n return _clist(people)", "def find(self, start: ghidra.program.model.address.Address, value: int) -> ghidra.program.model.address.Address:\n ...", "def search(self,name=None):\n\t\taddresses = discover_devices()\n\t\t#if len(addresses) == 0:\n\t\t#\treturn None\n\t\tnames = []\n\t\tfor adr in addresses:\n\t\t\tnames.append(lookup_name(adr))\n\t\t\tif name != None and name == names[-1]:\n\t\t\t\treturn adr\n\n\t\treturn zip(addresses,names)", "def receiveContactList(self, contactList):", "def view_contacts(self):\n with open(self.filename, \"r\") as contactsFile:\n contacts = self.display_contact(contactsFile.readlines())\n\n if not contacts:\n return self.msgbox(\"No contacts found.\")\n\n self.msgbox(msg=\"\\n\".join(contacts), title=\"Showing All Contacts\")", "def all_in_contact(cls, contact_id: int):\n for contact_tag in cls.get_all_in(\"contacts\", contact_id):\n yield contact_tag", "def contact_list(self):\n return self._contact_list", "def task_3_find_item_via_value(data: DT, value) -> DT:\n find = [find for find in data for n in find.values() if n == value]\n return find", "def search(self, val):\n search_through = self.head\n while search_through:\n if val == search_through.data:\n return search_through\n else:\n search_through = search_through.next\n return search_through", "def search_people_1(search_type, search_value):\n try:\n # Search Types:\n # First Name, Last Name, Address 1, Address 2, City, State, Zip Code, Phone, Email, Identification\n # COLLATE NOCASE\n # column place holder variables are needed for coalesce to work right.\n first_name = None\n last_name = None\n address_line_1 = None\n address_line_2 = None\n city = None\n state = None\n zip_code = None\n\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n\n # with name search we use coalesce and search for only one value at a time,\n # which ever is passed first name or last name. This means our WHERE clause effectively becomes one of two cases\n # Case 1: WHERE firstname = search_value AND lastname = lastname\n # Case 1: WHERE firstname = firstname AND lastname = search_value\n # This pattern is repeated for the remaining searches where more than one parameter is expected.\n if search_type in {\"First Name\", \"Last Name\"}:\n if search_type == \"First Name\":\n first_name = search_value\n if search_type == \"Last Name\":\n last_name = search_value\n\n c.execute(\"SELECT DISTINCT p.personid, p.firstname, p.lastname, p.middleinitial, p.nickname, \"\n \"p.dateofbirth, p.dateofdeath \"\n \"FROM person p \"\n \"WHERE p.firstname = coalesce(?, p.firstname) COLLATE NOCASE \"\n \"AND p.lastname = coalesce(?, p.lastname) COLLATE NOCASE;\", (first_name, last_name))\n\n if search_type in {\"Address 1\", \"Address 2\", \"City\", \"State\", \"Zip Code\"}:\n if search_type == \"Address 1\":\n address_line_1 = search_value\n if search_type == \"Address 2\":\n address_line_2 = search_value\n if search_type == \"City\":\n city = search_value\n if search_type == \"State\":\n state = search_value\n if search_type == \"Zip Code\":\n zip_code = search_value\n\n c.execute(\"SELECT DISTINCT p.personid, p.firstname, p.lastname, p.middleinitial, p.nickname, \"\n \"p.dateofbirth, p.dateofdeath \"\n \"FROM person p \"\n \"LEFT JOIN address a on a.personid = p.personid \"\n \"WHERE a.addressline1 = coalesce(?, a.addressline1) COLLATE NOCASE \"\n \"AND a.addressline2 = coalesce(?, a.addressline2) COLLATE NOCASE \"\n \"AND a.city = coalesce(?, a.city) COLLATE NOCASE \"\n \"AND a.state = coalesce(?, a.state) COLLATE NOCASE \"\n \"AND a.zipcode = coalesce(?, a.zipcode) COLLATE NOCASE;\", (address_line_1, address_line_2,\n city, state, zip_code))\n\n if search_type == \"Phone\":\n c.execute(\"SELECT DISTINCT p.personid, p.firstname, p.lastname, p.middleinitial, p.nickname, \"\n \"p.dateofbirth, p.dateofdeath \"\n \"FROM person p \"\n \"JOIN contact c on c.personid = p.personid \"\n \"JOIN phone ph on ph.contactid = c.contactid \"\n \"WHERE ph.areacode || ph.exchange || ph.trunk = ?;\", (re.sub(\"[^0-9]\", \"\", search_value),))\n\n if search_type == \"Email\":\n c.execute(\"SELECT DISTINCT p.personid, p.firstname, p.lastname, p.middleinitial, p.nickname,\"\n \"p.dateofbirth, p.dateofdeath \"\n \"FROM person p \"\n \"JOIN contact c on c.personid = p.personid \"\n \"JOIN email e on e.contactid = c.contactid \"\n \"WHERE e.emailaddress = ? COLLATE NOCASE;\", (search_value,))\n\n if search_type == \"Identification\":\n c.execute(\"SELECT DISTINCT p.personid, p.firstname, p.lastname, p.middleinitial, p.nickname, \"\n \"p.dateofbirth, p.dateofdeath \"\n \"FROM person p \"\n \"JOIN identification i on i.personid = p.personid \"\n \"WHERE i.identificationnumber = ? COLLATE NOCASE;\", (search_value,))\n\n p = []\n if search_type == \"All\":\n p = read_people()\n else:\n for row in c:\n _person = Person()\n _person.person_id = row[\"personid\"]\n _person.first_name = row[\"firstname\"]\n _person.last_name = row[\"lastname\"]\n _person.middle_initial = row[\"middleinitial\"]\n _person.nick_name = row[\"nickname\"]\n _person.date_of_birth = row[\"dateofbirth\"]\n _person.date_of_death = row[\"dateofdeath\"]\n p.append(_person)\n conn.close()\n return p\n except:\n return []", "def search_for_clients(client_list, search_term):\n matched_clients = []\n for client in client_list:\n if client.first == search_term or client.last == search_term or \\\n client.job == search_term or client.company == search_term:\n matched_clients.append(client)\n return matched_clients", "def get(self):\n args = GET_PARSER.parse_args()\n print(f'args={args}')\n\n return Contacts().get_all(\n args[\"phonetypeOne\"],\n args[\"phonetypeTwo\"],\n args[\"phonetypeThree\"],\n args[\"firstName\"],\n args[\"lastName\"],)", "def search_1(addresses: list, street_number: str, street_name: str) -> list:\n number_matches = set()\n name_matches = set()\n for address in addresses:\n if not street_number or street_number in address: number_matches.add(address)\n if not street_name or street_name in address: name_matches.add(address)\n intersection = number_matches & name_matches\n return list(intersection)", "def contacts(request):\n User = get_user_model()\n ids = set(request.user.chatmessage_set.all().values_list(\"recipients\", flat=True))\n context = {\n 'contacts': User.objects.filter(pk__in=ids)\n }\n return render(request, \"chat/contacts.html\", context)", "def Collection_search_by_name(C: list, name: str) -> list:\r\n result = [ ]\r\n for r in C:\r\n if r.name == name:\r\n result.append(r)\r\n return result", "def search_people_2(search_type, search_value):\n try:\n # Search Types:\n # First Name, Last Name, Address 1, Address 2, City, State, Zip Code, Phone, Email, Identification\n query_string_name =\\\n \"SELECT DISTINCT p.personid, p.firstname, p.lastname, p.middleinitial, p.nickname, \"\\\n \"p.dateofbirth, p.dateofdeath \"\\\n \"FROM person p \"\\\n \"WHERE {0} LIKE ? COLLATE NOCASE;\"\n query_string_address =\\\n \"SELECT DISTINCT p.personid, p.firstname, p.lastname, p.middleinitial, p.nickname, \"\\\n \"p.dateofbirth, p.dateofdeath \"\\\n \"FROM person p \"\\\n \"LEFT JOIN address a on a.personid = p.personid \"\\\n \"WHERE {0} LIKE ? COLLATE NOCASE \"\n\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n final_query = \"\"\n if search_type in {\"First Name\", \"Last Name\"}:\n if search_type == \"First Name\":\n final_query = query_string_name.format(\"p.firstname\")\n if search_type == \"Last Name\":\n final_query = query_string_name.format(\"p.lastname\")\n c.execute(final_query, (\"%\"+search_value+\"%\",))\n\n if search_type in {\"Address 1\", \"Address 2\", \"City\", \"State\", \"Zip Code\"}:\n if search_type == \"Address 1\":\n final_query = query_string_address.format(\"a.addressline1\")\n c.execute(final_query, (\"%\"+search_value+\"%\",))\n if search_type == \"Address 2\":\n final_query = query_string_address.format(\"a.addressline2\")\n c.execute(final_query, (\"%\"+search_value+\"%\",))\n if search_type == \"City\":\n final_query = query_string_address.format(\"a.city\")\n c.execute(final_query, (\"%\"+search_value+\"%\",))\n if search_type == \"State\":\n final_query = query_string_address.format(\"a.state\")\n c.execute(final_query, (\"%\"+search_value+\"%\",))\n if search_type == \"Zip Code\":\n final_query = query_string_address.format(\"a.zipcode\")\n c.execute(final_query, (\"%\"+search_value+\"%\",))\n\n if search_type == \"Phone\":\n c.execute(\"SELECT DISTINCT p.personid, p.firstname, p.lastname, p.middleinitial, p.nickname, \"\n \"p.dateofbirth, p.dateofdeath \"\n \"FROM person p \"\n \"JOIN contact c on c.personid = p.personid \"\n \"JOIN phone ph on ph.contactid = c.contactid \"\n \"WHERE ph.areacode || ph.exchange || ph.trunk LIKE ?;\", (\"%\"+search_value+\"%\",))\n\n if search_type == \"Email\":\n c.execute(\"SELECT DISTINCT p.personid, p.firstname, p.lastname, p.middleinitial, p.nickname,\"\n \"p.dateofbirth, p.dateofdeath \"\n \"FROM person p \"\n \"JOIN contact c on c.personid = p.personid \"\n \"JOIN email e on e.contactid = c.contactid \"\n \"WHERE e.emailaddress LIKE ? COLLATE NOCASE;\", (\"%\"+search_value+\"%\",))\n\n if search_type == \"Identification\":\n c.execute(\"SELECT DISTINCT p.personid, p.firstname, p.lastname, p.middleinitial, p.nickname, \"\n \"p.dateofbirth, p.dateofdeath \"\n \"FROM person p \"\n \"JOIN identification i on i.personid = p.personid \"\n \"WHERE i.identificationnumber LIKE ? COLLATE NOCASE;\", (\"%\"+search_value+\"%\",))\n\n p = []\n if search_type == \"All\":\n p = read_people()\n else:\n for row in c:\n _person = Person()\n _person.person_id = row[\"personid\"]\n _person.first_name = row[\"firstname\"]\n _person.last_name = row[\"lastname\"]\n _person.middle_initial = row[\"middleinitial\"]\n _person.nick_name = row[\"nickname\"]\n _person.date_of_birth = row[\"dateofbirth\"]\n _person.date_of_death = row[\"dateofdeath\"]\n p.append(_person)\n conn.close()\n return p\n except Exception as exc:\n aexc = exc\n return []", "def search_by_name(self, name):\r\n return self.__filter(self.get_all_persons(), lambda x: name.lower().strip() in x.name.lower().strip())", "def get_contacts_list(self):\n contacts = self.driver.find_elements_by_class_name(\"_1wjpf\")\n s= [contact.text for contact in contacts] #extracts chats and last messsages\n print (\"get contacts: \"+str(s)) #print only chat names\n return s[::2] #returns only chat names", "def search_customer(login=\"\", name=\"\", phone=\"\", email=\"\", permission=\"\"):\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\n \"\"\"\n SELECT id_customer, login, customer_name, phone, email, perm\n FROM Customers\n WHERE login=? OR customer_name=? OR phone=? OR email=? or perm=?\n \"\"\",\n (login, name, phone, email, permission))\n return cursor.fetchall()", "def get_chromosome_from_list(self,_class,index):\n search_list = self.chromo_list[index]\n #List doesnt exist\n if search_list is None:\n return None\n #List exists\n else:\n search_chromosome = None\n for chromosomes in search_list:\n #Class is found\n if chromosomes._class is _class:\n search_chromosome = chromosomes\n break\n #Class is not found\n else:\n search_chromosome = None\n \n return search_chromosome", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def get_queryset(self):\n return self.request.user.contacts.all()", "def search(self, find_val):\n return False", "def search_student(student):\n result=[]\n for name,age in alumnos.items():\n if student.lower() in name.lower():\n result.append(name)\n\n print(f\"Result {result}\")\n return result", "def search(self, **kwargs):\n ret = self.addresses\n for key, val in kwargs.items():\n # Slightly odd syntax setting default values for key and val so that\n # v and k are not leaky cell variables.\n ret = list(\n filter(lambda aitem, v=val, k=key: v in getattr(aitem, k, \"\"), ret)\n )\n if not ret:\n raise KeyError(\"No addresses found matching criteria.\")\n return ret", "def contacts(self):\r\n return contacts.Contacts(self)", "def search_user(message, search):\n found = []\n search = search.lower()\n users = hf.get_users()\n for user in users:\n if search in user['name'].lower():\n found.append('{} ({})'.format(user['name'], user[\"id\"]))\n if len(found) == 0:\n message.reply('No user found by that key: {}.'.format(search))\n return\n message.reply('Users found: {}'.format(', '.join(found)))", "def linearsearch(input, value):\n count = 0\n for i in input:\n if (value == i):\n count += 1\n if count > 0:\n return \"Value, {0}, is in the list\".format(value)\n else:\n return \"Value, {0}, cannot be found\".format(value)", "def get_names(book, phone):\n # поиск в словаре\n i_min = 0\n i_max = len(book)\n i = math.ceil(i_max / 2)\n\n while book[i][0]!=phone:\n #print(i, i_min, i_max, phone, book[i][0])\n #input()\n\n if book[i][0]==phone:\n return book[i][1]\n\n elif book[i][0] < phone:\n i_min = i\n i = i_min + math.ceil((i_max - i_min) / 2)\n\n elif book[i][0] > phone:\n i_max = i\n i = i_min + math.ceil((i_max - i_min) / 2)\n else:\n print(\"что-то пошло не так\")\n return None\n\n if i==i_min or i==i_max:\n return None\n\n if book[i][0]==phone:\n return book[i][1]\n\n return None", "def matchloc(alist,val): \n return [ilc for ilc,jlc in enumerate(alist) if jlc==val]", "def search():\n student_to_find=request.args.get(\"student\", None)\n print(f\"A buscar: {student_to_find}\")\n student_list=search_student(student_to_find)\n return render_template(\"search.html\",student_list_result=student_list)", "def book_search(search_point: str, info: str, data: list) -> list:\n result = []\n for i in range(len(data)):\n if search_point == \"shelf\":\n try:\n if int(info) == int(data[i][search_point]):\n result.append(data[i])\n except ValueError:\n if info in data[i][search_point].lower():\n result.append(data[i])\n else:\n if info in data[i][search_point].lower():\n result.append(data[i])\n return result", "def find(self, key, condition) -> list:\n pass", "def fetch_contacts(owner_account_id):\n resp = oauth.tapkey.get(f\"Owners/{owner_account_id}/Contacts?$select=id,identifier\")\n contacts = resp.json()\n return contacts", "def test_get_filter_effective_contacts(self):\n data = {\"type_contact\": 1}\n response = self.client.get(reverse('contacts-filter'), data)\n # import pdb; pdb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"count\"], 2)", "def search(self, btn):\n srec = [self.tbxName.get_text(),self.tbxTel.get_text(),self.tbxEmail.get_text()]\n self.cursor.execute(\"\"\"select * from Kontakty where Nazwa=? or Nr_Tel=? or Email=? \"\"\",srec)\n wynik = self.cursor.fetchone()\n if wynik is not None:\n print(wynik)\n self.tbxName.set_text(wynik[0])\n self.tbxTel.set_text(wynik[1])\n self.tbxEmail.set_text(wynik[2])\n self.update()\n self.show_contacts()", "def __ui_search_persons_by_name(self):\n searched_name = input(\"Introduce the name: \").strip().lower()\n if searched_name == \"\":\n print(\"You cannot search persons by an empty name!\\n\")\n return\n\n searched_persons = self.__person_service.find_persons_by_name(searched_name)\n\n if len(searched_persons) == 0:\n print('There is no person whose name contains \"{}\"!\\n'.format(searched_name))\n else:\n print(\"\")\n for person in searched_persons:\n print(person)\n print(\"\")", "def search_by_phone_number(self, phone_number):\r\n if len(re.findall(\"[^0-9-+ ]+\", phone_number)) or len([c for c in phone_number if c == '+']) > 1:\r\n raise PersonPhoneNumberException(\"Invalid phone number search input. Can only contain digits, hyphens,\"\r\n \"spaces, and a plus sign(+).\")\r\n phone_number = phone_number.replace(' ', '')\r\n phone_number = phone_number.replace('-', '')\r\n phone_number = phone_number.replace('+4', '')\r\n return self.__filter(self.get_all_persons(), lambda x: phone_number in x.phone_number.replace(' ', ''))", "def contactListClicked(self):\n \n contacts = self.userList.getSelectedItems()\n self.mergeButton.setEnabled(contacts != None and len(contacts) > 1)\n \n if contacts != None and len(contacts) == 1:\n self.messageList.filterByContact(contacts[0])\n else:\n self.messageList.removeFilter()", "def search(self, data):\n index = self.hash_function(data)\n return self.objects_list[index].search_item(data)", "def search_for_customer(self, name):\n customers_list = self.get_customers()\n return next((customer for customer in customers_list if customer.get('name') == name), {'name': None, 'parent':None, 'active': None, 'link': None })", "def search(self, query):", "def activitySearch (listAct,activity):\n \n for act in listAct:\n if (act.name == activity.name): \n return True", "def getContactByName(self, name):\n for contact in self.contacts:\n if name == contact.name:\n return contact\n\n return None", "def get_all(self):\n total_contacts = []\n get_count = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'CONTACTNAME',\n 'COMPANYNAME',\n 'FIRSTNAME',\n 'LASTNAME',\n 'INITIAL',\n 'PRINTAS',\n 'TAXABLE',\n 'MAILADDRESS.ADDRESS1'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n contacts = self.format_and_send_request(data)['data']['CONTACT']\n total_contacts = total_contacts + contacts\n offset = offset + pagesize\n return total_contacts", "def search(self, key, headers=Headers()):", "def person_in_list(position: OrderedDict, lst: List[OrderedDict]):\n for p in filter(lambda x: x[\"person\"] == position[\"person\"], lst):\n return p\n return None", "def get_a_contact(self, uid):\n self.init_db(self._testing)\n\n query = \"SELECT {} FROM {} WHERE (id=?) ORDER BY id;\".format(\n \", \".join(Contact.columns_with_uid), Contact.table_name)\n\n data = self.db.conn.execute(query, (uid,))\n\n return [Contact(*item) for item in data]", "def show_contacts():\n data_list = queries2.contacts()[0]\n table_titles = queries2.contacts()[1]\n title = \"Contacts\"\n return render_template('pages.html', data_list=data_list, title=title, table_titles=table_titles)", "def get(self, set=''):\n params = {}\n if set: params['set'] = set\n\n request = self._connection.get('contacts.json', params=params)\n if request.status_code != 200:\n raise Exception('status code {0}: cannot get contacts'.format(request.status_code))\n return [User.parse(self._connection, each) for each in request.json()]", "def search_user(message, search):\n found = []\n search = search.lower()\n for userid, user in iteritems(message._client.users):\n if search in user['name'].lower():\n found.append('{} ({})'.format(user['name'], userid))\n if len(found) == 0:\n message.reply('No user found by that key: {}.'.format(search))\n return\n message.reply('Users found: {}'.format(', '.join(found)))", "def task_3_find_item_via_value(data: DT, value) -> DT:\n return [dic for dic in data if value in dic.values()]", "def shortsearch(term,location):\n results = search(term,location)['listings']\n result = []\n for business in results:\n result.append([business['id'],business['name'],\"Yellow Pages\"])\n return result", "def search(self, filter):\n return [note for note in self.notes if note.match(filter)]", "def get_contacts(self):\n feet = [\"REAR_RIGHT_FOOT\", \"REAR_LEFT_FOOT\",\n \"FRONT_RIGHT_FOOT\", \"FRONT_LEFT_FOOT\"]\n contacts = np.zeros(4, dtype=np.float32)\n for i, foot in enumerate(feet):\n if self.supervisor.getFromDef(foot).getNumberOfContactPoints() > 0:\n contacts[i] = 1.0\n return contacts", "def search(self, term):", "def findConnections(userToLookForConnections):\n import json\n with open(\"filter.json\", \"r\") as opened:\n filtering = json.load(opened) #Filter that contains all the excellent players from 2011 to 2017. Type: DICT\n for teamName, squadList in filtering.items():\n if userToLookForConnections in squadList:\n print(\"ciao\")\n #crea arco\n return None", "def search_by_email(self, request, **kwargs):\n self.method_check(request, allowed=['get'])\n self.throttle_check(request)\n\n keyword = request.GET['keyword']\n members = Member.objects.filter(email__icontains=keyword)\n\n bundles = []\n\n for member in members:\n bundle = self.build_bundle(obj=member, request=request)\n bundles.append(self.full_dehydrate(bundle, for_list=True))\n\n return self.create_response(request, bundles)", "def gremlin_contact_maps(dist):\n\n\tprint dist\n\tcontact_cutoff = 10\n\tgremlin = [[6,13],[9,22],[15,19],[14,18],[3,11],[34,40],[3,23],[36,40],[9,13],[25,28],[12,15],[11,23], \\\n\t\t[26,35],[12,18],[2,5],[17,21],[14,22],[6,9],[41,44],[15,18],[25,30],[9,16],[29,32],[30,33],[6,16]] \n\n\tcontacts = np.zeros(dist.shape)\n\tfor n in range(dist.shape[0]):\n\t\tfor m in range(dist.shape[1]):\n\t\t\tif dist[n][m] < contact_cutoff and (gremlin.count([n,m]) == 1 or gremlin.count([m,n]) == 1):\n\t\t\t\tcontacts[n][m] = 1\n\treturn contacts", "def test_get_contacts(self):\n pass", "def search():\n pass", "def search_by_account_number(self, account_num):\n for entry in self.entries:\n if entry['Account Number'] == int(account_num):\n self.pp_entry(entry)", "def getindex(self,name,searchfrom='name'):\n name = name.replace(':','_').lower()\n pat = re.compile(name)\n result = []\n\n for (i,elem) in enumerate(self.lat):\n if pat.search(elem[searchby]):\n result.append(i)\n return result", "def select_search(search_result: list, index: int):\n return search_result[index][0]", "def get_ldap_contact(ldap_conn, base_dn, employee_number, unique_id, attrs, cache):\r\n search_filter = '{0}={1}'.format(unique_id, employee_number)\r\n results = ldap_conn.search_s(base_dn, ldap.SCOPE_SUBTREE, search_filter, attrs)\r\n contact_found = {}\r\n if results:\r\n attrs_found = results[0][1]\r\n # cache the dn for the employee_number\r\n cache[employee_number] = results[0][0]\r\n for key in attrs:\r\n if key in attrs_found:\r\n contact_found[key] = attrs_found[key][0]\r\n else:\r\n contact_found[key] = False\r\n else:\r\n logging.warning('Cannot found employee in ldap ' + employee_number)\r\n return contact_found", "def contact(self, request, **kwargs):\n group_obj = self.get_object()\n contact_data = group_obj.contacts.all()\n if contact_data is not None:\n serializer_data = ContactSerializer(contact_data, many=True)\n return Response(serializer_data.data)\n else:\n return Response({'message': 'No details found for contact of this group'}, status=status.HTTP_404_NOT_FOUND)", "def search(self, filtr):\n return [note for note in self.notes if note.match(filtr)]", "def lookup(args):\n name = args[1]\n phonebook = args[2]\n try:\n with open(phonebook) as f:\n intermediate_variable = [line for line in f if line.index(name) >= 0]\n f.close()\n return intermediate_variable\n except IOError:\n return ['Error: no such phonebook.']\n except ValueError:\n return ['Error: %s not found.' % name]", "def contains(self, value):\n n = self.search(value)\n return (n.value==value, n)", "def _findAndAddContactByPhone(self, phone):\n try:\n contact = self._findAndAddContactsByPhone(phone)\n except TalkException as e:\n self.raise_error(e.reason)\n\n contact = contact.values()[0]\n\n for c in self.contacts:\n if c.id == contact.mid:\n self.raise_error(\"%s already exists\" % contact.displayName)\n return\n\n c = LineContact(self, contact)\n self.contacts.append(c)\n\n self.contacts.sort()\n return c" ]
[ "0.8009426", "0.68117666", "0.6737137", "0.6643471", "0.64197785", "0.63611305", "0.63020825", "0.6217333", "0.6197536", "0.61852074", "0.6019509", "0.60167944", "0.60021037", "0.5823502", "0.58208525", "0.57927597", "0.5778662", "0.5749409", "0.5742812", "0.5683135", "0.5647284", "0.5640702", "0.5614454", "0.560552", "0.55856794", "0.5564596", "0.5562407", "0.5545831", "0.5540595", "0.55298716", "0.54953563", "0.54812956", "0.54627997", "0.546016", "0.5453848", "0.5452514", "0.54507655", "0.5445446", "0.54303694", "0.5426671", "0.54140925", "0.5402318", "0.5398769", "0.5397479", "0.5386014", "0.5384959", "0.53782046", "0.53710717", "0.53412616", "0.5340507", "0.53401273", "0.5331999", "0.53285986", "0.52929", "0.5292655", "0.52867717", "0.5277128", "0.5264054", "0.5259463", "0.5256655", "0.5242983", "0.5236511", "0.52308863", "0.5226923", "0.52247465", "0.52215993", "0.5220016", "0.52085364", "0.52046144", "0.51974946", "0.51944876", "0.5187379", "0.51602995", "0.51600945", "0.5156715", "0.51506543", "0.5146032", "0.514016", "0.5134425", "0.51336795", "0.5104326", "0.50993097", "0.50965476", "0.50951356", "0.50852776", "0.5084831", "0.50800455", "0.5077134", "0.5068928", "0.50631976", "0.5061413", "0.50609654", "0.50607884", "0.5057491", "0.50553614", "0.5054565", "0.5053386", "0.5053036", "0.5053016", "0.5049988" ]
0.7042657
1
Sort list by a given key
def sort_list(self, key_): options = { 'index': 0, 'name' : 1, 'surname': 2, 'email': 3, 'phone': 4, } if key_ in options.keys(): key_ = options.get(key_) return(sorted(self.contacts, key = lambda x: x[key_]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort_list(list, key):\r\n list.sort(lambda x,y: cmp(key(x), key(y))) # Python < 2.4 hack\r\n return list", "def sort_by(dict_list, key):\n return sorted(dict_list, key=lambda k: k[key])", "def sort(self, key: Callable):\n self.data.sort(key=key)", "def sort(self, key: Callable):\n self.data.sort(key=key)", "def sort(self, key_func):\n pass", "def order_list_of_dicts_by_key(list_of_dicts, field_key):\r\n return sorted(list_of_dicts, key=lambda item: int(item[field_key]))", "def _sort_by(key):\n\n @staticmethod\n def sort_by(p_list, reverse=False):\n \"\"\"\n :rtype: typing.Iterable[Path]\n \"\"\"\n return sorted(\n p_list,\n key=lambda p: getattr(p, key),\n reverse=reverse,\n )\n\n return sort_by", "def keyListSort(keyList):\n keyList.sort(key=lambda y: y.GetName().lower())", "def sorted_nicely(l, key):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda item: [ convert(c) for c in re.split('([0-9]+)', key(item)) ]\n return sorted(l, key = alphanum_key)", "def SortList(self, key: callable = str.lower):\n temp_list = self.Items\n temp_list.sort(key=key)\n # delete contents of present listbox\n self.delete(0, Tags.End.value)\n # load listbox with sorted data\n for item in temp_list:\n self.insert(Tags.End.value, item)", "def sortList(lst, reverse=False, key=None):\n return sorted(lst, key=key, reverse=reverse)", "def sorted(cls:L, key=None, reverse=False):\n if isinstance(key,str): k=lambda o:getattr(o,key,0)\n elif isinstance(key,int): k=itemgetter(key)\n else: k=key\n return L(sorted(cls.items, key=k, reverse=reverse))", "def keysort(*args, **kwargs): # real signature unknown\n pass", "def sort_nicely(alist, dict_key=None):\n convert = lambda text: int(text) if text.isdigit() else text\n if dict_key is None:\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n else:\n alphanum_key = operator.itemgetter(dict_key)\n alist.sort(key=alphanum_key)", "def sortListWithHash(list,order_by,order_by_hash,default,desc):\n if order_by_hash.has_key(order_by):\n return sortList(list,order_by_hash[order_by],\"\",desc)\n else:\n return sortList(list,order_by_hash[default],\"\",desc)", "def sortListWithHash(list,order_by,order_by_hash,default,desc):\n if order_by_hash.has_key(order_by):\n\treturn sortList(list,order_by_hash[order_by],\"\",desc)\n else:\n\treturn sortList(list,order_by_hash[default],\"\",desc)", "def natsort(lst):\n lst.sort(key=natsort_key)", "def sort_list(self,list_):\r\n list_.sort()", "def sort(self, key: str):\n return self._select_interface(self._rc_sort, self._http_sort, key)", "def sort_key(self):\n ...", "def order(list, cmp=None, key=None, reverse=False):\n if cmp and key:\n f = lambda i, j: cmp(key(list[i]), key(list[j]))\n elif cmp:\n f = lambda i, j: cmp(list[i], list[j])\n elif key:\n f = lambda i, j: int(key(list[i]) >= key(list[j])) * 2 - 1\n else:\n f = lambda i, j: int(list[i] >= list[j]) * 2 - 1\n return sorted(range(len(list)), cmp=f, reverse=reverse)", "def sort_nicely(l):\r\n\tl.sort(key=alphanum_key)", "def dic_sort(list_of_dicts, key):\n for passnum in range(len(list_of_dicts) - 1, 0, -1):\n is_sorted = True\n for idx in range(passnum):\n if list_of_dicts[idx][key] > list_of_dicts[idx + 1][key]:\n temp = list_of_dicts[idx]\n list_of_dicts[idx] = list_of_dicts[idx + 1]\n list_of_dicts[idx + 1] = temp\n is_sorted = False\n if is_sorted:\n return", "def natsort_icase(lst):\n lst.sort(key=natsort_key_icase)", "def sort_by_key(request):\n return request.param", "def sort_by_key(request):\n return request.param", "def sort_nicely(l):\n l.sort(key=alphanum_key)", "def sort_nicely(l):\n l.sort(key=alphanum_key)", "def recursive_sort(list_to_sort, key=0):\n length = len(list_to_sort)\n if length <= 1:\n return list_to_sort\n swaplist = list_to_sort.copy()\n for i in range(0, length - 1):\n if swaplist[i][key] > swaplist[i + 1][key]:\n (swaplist[i], swaplist[i + 1]) = \\\n (swaplist[i + 1], swaplist[i])\n return recursive_sort(swaplist[0:length - 1], key) \\\n + swaplist[length - 1:length]", "def natsorted(lst):\n return sorted(lst, key=natsort_key)", "def natsort(lst: List[str]) -> None:\n lst.sort(key=natsort_key)", "def sort_nicely(l):\n l.sort(key=alphanum_key)\n return l", "def sort_nicely(l):\n l.sort(key=alphanum_key)\n return l", "def _key_sorting(item):\n key, value = item\n if isinstance(value, Link):\n return (1, key)\n return (0, key)", "def sort_L3():\n for item in d_list:\n item.sort(key=operator.itemgetter(1))", "def order_by(self, results, key_, direction=\"ASC\"):\n\n return sorted(results, key=lambda x: x.get(key_), reverse=direction==\"DESC\")", "def sorted_nicely(ls, key, rev=False):\n def convert(text):\n return int(text) if text.isdigit() else text\n\n def alphanum_key(item):\n return [convert(c) for c in re.split('([0-9]+)', key(item))]\n\n return sorted(ls, key=alphanum_key, reverse=rev)", "def human_sort(l):\n l.sort(key=alphanum_key)\n return l", "def natsort_icase(lst: List[str]) -> None:\n lst.sort(key=natsort_key_icase)", "def NiceSort(values, key=None):\n if key is None:\n keyfunc = NiceSortKey\n else:\n keyfunc = lambda value: NiceSortKey(key(value))\n\n return sorted(values, key=keyfunc)", "def _to_order(key):\n return list(sorted(key).index(char) for char in key)", "def _sort_key(k):\n ret = []\n for s in k.common_path:\n s = (s if isinstance(s, (int, text_type)) else s.decode())\n\n if isinstance(s, text_type) and s.isnumeric() or isinstance(s, int):\n ret.append(('', -int(s)))\n else:\n ret.append((s,))\n return ret", "def sort(self, args):\n if not args:\n self.err_print('One argument required')\n return\n\n _key = args[0]\n cur = self.ui.leftwin.highlighted().data\n try:\n ind = song.tags.index(_key)\n cur.change_sort(ind)\n self.ui.rightwin.disp()\n except:\n self.err_print('\"{}\" is not a valid key to sort by'.format(_key))", "def get_sorted_list(_dict, sorted_by_key=False, reverse=True):\n if sorted_by_key:\n return sorted(_dict.items(), key=operator.itemgetter(0), reverse=reverse)\n else:\n return sorted(_dict.items(), key=operator.itemgetter(1), reverse=reverse)", "def sort_names(li, by_which):\n \n if by_which == 'first':\n li.sort(key = Name.first)\n elif by_which == 'last':\n li.sort(key = Name.last)", "def sort(self, _cmp=None, key=None):\n if len(self) == 0:\n return\n\n if _cmp is not None:\n from functools import cmp_to_key\n from sage.misc.superseded import deprecation\n deprecation(21145, \"Please use 'key' to sort.\")\n self.__x.sort(key=cmp_to_key(_cmp))\n return\n\n if key is not None:\n self.__x.sort(key=key)\n return\n\n a = self.__x[0][0]\n sort_key = None\n if hasattr(a, 'dimension'):\n try:\n a.dimension()\n\n def sort_key(f):\n return (f[0].dimension(), f[1], f[0])\n except (AttributeError, NotImplementedError, TypeError):\n pass\n elif hasattr(a, 'degree'):\n try:\n a.degree()\n\n def sort_key(f):\n return (f[0].degree(), f[1], f[0])\n except (AttributeError, NotImplementedError, TypeError):\n pass\n\n if sort_key is None:\n\n def sort_key(f):\n return f[0]\n\n self.__x.sort(key=sort_key)", "def sort(self, key=None, reverse=False):\n self.log('sort()')\n self.contents.sort(key=key, reverse=reverse)\n return None", "def sort(self):\r\n self.list.sort(key=lambda x: ''.join(x))", "def insertionsort(A:list) -> \"void\":\n\tfor j in range(1, len(A)):\n\n\t\tkey = A[j]\n\t\ti = j - 1\n\n\t\twhile i >= 0 and A[i] > key:\n\t\t\tA[i+1] = A[i]\n\t\t\ti = i - 1\n\n\t\tA[i+1] = key", "def sort_by(processes, key, reverse=False):\n return sorted(processes, key=lambda process: process[key], reverse=reverse)", "def heap_sort(alist: list, key=None) -> list:\n newList = List()\n hp = BinaryHeap(func=key)\n\n for item in alist:\n hp.heappush(item)\n\n for _ in range(len(alist)):\n newList.append(hp.heappop())\n\n return newList", "def sortn(xs):\n return sorted(xs, key=sortnkey)", "def insertion_sort(items, key):\n # if order == \"reverse\":\n # compare = operator.lt\n # elif order == \"normal\":\n # compare = operator.gt\n global COMPARE\n\n # Repeat until all items are in sorted order\n for index in range(len(items)):\n iterator = index\n\n # Take first unsorted item\n while COMPARE(key(items[iterator-1]), key(items[index])) and iterator > 0:\n iterator -= 1\n # Insert it in sorted order in front of items\n sorteditem = items.pop(index)\n items.insert(iterator, sorteditem)\n\n return items", "def sort(self, key: Callable[[T], V]=None, reverse: bool=False) -> 'List[T]':\n return sorted(self.array, key=key, reverse=reverse)", "def sorted_todos(todo_list, keys, reverses=None):\n if not reverses:\n reverses = [False] * len(keys)\n sorted_todos = todo_list\n for ikey, key in enumerate(keys):\n sorted_todos = sorted(sorted_todos, key=attrgetter(key), reverse=reverses[ikey])\n return sorted_todos", "def sort_n(lists, key=None, reverse=False):\n lists = [list(x) for x in lists]\n if key is None:\n return unzip(sorted(zip(*lists), reverse=reverse))\n else:\n return unzip(sorted(zip(*lists), key=lambda x: key(x[0]), reverse=reverse))", "def sort_sequence_by_key(sequence, key_name, reverse=False):\n def _sorting_fn(item):\n # using this fn ensures that 'sort_sequence_by_key' will work\n # for a list of dictionaries or a list of objects\n # (the latter is a special use-case; a QS can use the '.order_by' filter, but an actual list of models cannot)\n try:\n return item.get(key_name)\n except AttributeError:\n return getattr(item, key_name)\n\n sorted_sequence = sorted(\n sequence,\n key=lambda item: _sorting_fn(item),\n reverse=reverse,\n )\n return sorted_sequence", "def sort(self):\n self.list.sort(key=lambda x: ''.join)", "def sort_1(l):\n pass", "def insertion_sort(my_list):\n\n # Start at the second element (pos 1).\n # Use this element to insert into the\n # list.\n for key_pos in range(1, len(my_list)): # n\n\n # Get the value of the element to insert\n key_value = my_list[key_pos]\n\n # Scan from right to the left (start of list)\n scan_pos = key_pos - 1\n\n # Loop each element, moving them up until\n # we reach the position the\n while (scan_pos >= 0) and (my_list[scan_pos] > key_value): # n/4, total of n squared / 4\n my_list[scan_pos + 1] = my_list[scan_pos]\n scan_pos = scan_pos - 1\n\n # Everything's been moved out of the way, insert\n # the key into the correct location\n my_list[scan_pos + 1] = key_value", "def insort(\n a: t.List[T],\n x: T,\n lo: int = 0,\n hi: t.Optional[int] = None,\n key: t.Callable[..., t.Any] = lambda el: el,\n) -> None:\n lo = bisect_right(a, x, lo, hi, key)\n a.insert(lo, x)", "def _mySort(self, alist):\n return sorted(alist, key=lambda x: (x[0].isdigit(), x.lower()))", "def arrange(self, card_key):\r\n self._cards_on_hand.sort(key=card_key)", "def _ordered_dictionary_sort(d, key=None):\n\n items = [(k, d[k]) for k in sorted(d, key=key)]\n\n d.clear()\n\n d.update(items)", "def pyargsort(seq,cmp=None,key=lambda x:x):\n return sorted(list(range(len(seq))),key=lambda x:key(seq.__getitem__(x)),cmp=None)", "def sort_by(self, param):\n sorted(self.books_all, key=lambda k: k[param])\n return self.books_all", "def sortby(self):\n ...", "def sort(self, cmp=None, key=None, reverse=False):\n o = order(list(self), cmp, key, reverse)\n # Modify the table in place, more than one variable may be referencing it:\n r=list(self._table); [self._table.__setitem__(i2, r[i1]) for i2, i1 in enumerate(o)]", "def sort(self, *args, **kargs):\n list.sort(self, *args, **kargs)\n self.emit('modified')", "def counting_sort(l, keyRange=None):\n\n if keyRange == None:\n keyRange = max(l) + 1\n countList = [0]*keyRange\n accumulateList = [0]*keyRange\n\n for key in l:\n countList[key] = countList[key] + 1\n for i in range(1, keyRange):\n accumulateList[i] = countList[i-1] + accumulateList[i-1]\n\n sorted_l = [None]*len(l)\n for j in range(len(l)):\n key = l[j]\n index = accumulateList[key]\n sorted_l[index] = key\n accumulateList[key] = accumulateList[key] + 1\n\n return sorted_l", "def sort_by_name(list_to_sort):\n return sorted(\n list_to_sort,\n key=lambda k: k['Name'].lower()\n )", "def natural_sorted(iterable, key=None, reverse=False):\n prog = re.compile(r\"(\\d+)\")\n\n def alphanum_key(element):\n \"\"\"Split given key in list of strings and digits\"\"\"\n return [int(c) if c.isdigit() else c for c in prog.split(element[0])]\n\n return sorted(iterable, key=alphanum_key, reverse=reverse)", "def sort(self, *keys):\n s = self._clone()\n s._sort = []\n for k in keys:\n if isinstance(k, str) and k.startswith('-'):\n k = {k[1:]: {\"order\": \"desc\"}}\n s._sort.append(k)\n return s", "def sort_list_by_president_order(pronoun_proportion_list):\n return sorted(pronoun_proportion_list, key=lambda (k,d,v): (d,k,v))", "def sort_byint(keys):\n keys = list(keys)\n def byint(a, b):\n try:\n return cmp(int(a), int(b))\n except TypeError:\n return cmp(a, b)\n keys.sort(byint)\n keys.reverse()\n return keys", "def diffsort(self, key):\n # Append newlines because difflib works better with them\n a = [s + '\\n' for s in self.d[key]]\n b = sorted(a, key=str.lower)\n return difflib.unified_diff(a, b, fromfile=key+' unsorted',\n tofile=key+' sorted')", "def sort_nicely(l): \n import re\n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key=alphanum_key)", "def sort_data(data):\n data.sort(key=itemgetter(3,2))\n return data", "def humanSort(l): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n l.sort( key=alphanum_key )", "def _sorted_items(x):\n return sorted(x.items(), key=lambda x: x[0])", "def sort_list(directory_list: List[str], charbefore:int = 20, extension:str = '.bin') -> List[str]:\n def func(x):\n charafter = -9 if extension =='.json' else -4\n # print(\"func: \", x[:charbefore]+x[charbefore:][:charafter].zfill(3))\n return x[:charbefore]+x[charbefore:][:charafter].zfill(3)\n \n return sorted(directory_list,key=func)", "def wsort(self, key: Callable[[T], V]=None, reverse: bool=False) -> '_[T]':\n return _(sorted(self.array, key=key, reverse=reverse))", "def human_sort( l ):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n alphanum_key = None\n try:\n l.sort( key=alphanum_key )\n except TypeError:\n l.sort()\n return l", "def natsorted_icase(lst):\n return sorted(lst, key=natsort_key_icase)", "def sort_contacts(contacts):\n \n key_list = list(contacts.keys()) #get keys\n key_list.sort() #sort key_list\n sorted_list = [] #initialize sorted list\n for key in key_list:\n contact = (key, contacts[key][0], contacts[key][1]) #create tuple\n sorted_list += [contact] #add tuple to list\n \n return(sorted_list)", "def sorted_items_from_pages(cls, pages, item_key, sort_key):\n items = []\n for page in pages:\n items.extend(page[item_key])\n result = sorted(items, key=operator.itemgetter(sort_key))\n return result", "def _sort(self, row):\n if not self._head:\n self._head = self._create_head(row)\n if self._args.head:\n return row\n\n if 'key' not in self._state:\n self._state['key'] = self._replace_fields(self._args.key)\n\n r = list(map(self._convert, row))\n self._sorting_insert(self._result, r, key=lambda r: eval(self._state['key']))", "def sortedItems (dict):\n items = dict.items ()\n items.sort ()\n return items", "def _sorted(unsorted_iterable, key=None, reverse=False):\n\tunsorted_list = list(unsorted_iterable)\n\tsort_inplace(unsorted_list, key=key)\n\tif reverse:\n\t\tunsorted_list.reverse()\n\treturn unsorted_list", "def sortByValue(d):\r\n items=d.items()\r\n backitems=[ [v[1],v[0]] for v in items]\r\n backitems.sort(); backitems.reverse()\r\n return [ backitems[i][1] for i in range(0,len(backitems))]", "def sorted_nicely( l ): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key = alphanum_key)", "def sort(self, value_key=None, ascending=True):\r\n\t\tsorted_indexes = MultiPointData.sort(self, value_key=value_key, ascending=ascending)\r\n\t\tself.sdr = np.array(self.sdr)[sorted_indexes]\r\n\t\treturn sorted_indexes", "def titleSort(dictList):\n\tres = sorted(dictList, key=lambda k: getSortTitle(k))\n\treturn res", "def sort_0(l):\n l.sort()", "def sort_probs(probs_list):\n return sorted(probs_list, key=lambda x: x[1])", "def _sort_nodes(cls: Type, lst: List[Dict[str, Any]],\n by: str = 'item_title'):\n assert type(lst) == list\n lst.sort(key=lambda n: n[by])\n for n in lst:\n if 'nodes' in n:\n cls._sort_nodes(n['nodes'], by)", "def reverse_insort(\n a: t.List[T],\n x: T,\n lo: int = 0,\n hi: t.Optional[int] = None,\n key: t.Callable[..., t.Any] = lambda el: el,\n) -> None:\n lo = bisect_right(a, x, lo, hi, key, reversed=True)\n a.insert(lo, x)", "def sort(self):\n self.keys = sorted(self.keys)\n self.keys.reverse()", "def sort_prices(list_of_tuples):\n list_of_tuples.sort(key = get_price, reverse = True)\n return list_of_tuples", "def sorted_insert(x, l: list, key=lambda x: x, lo=0, hi=None):\n\n if lo < 0:\n raise ValueError('lo must be non-negative')\n if hi is None:\n hi = len(l)\n while lo < hi:\n mid = (lo+hi)//2\n if key(l[mid]) < key(x): lo = mid+1\n else: hi = mid\n l.insert(lo, x)\n\n return lo" ]
[ "0.85183585", "0.76804423", "0.74490815", "0.74490815", "0.7327094", "0.7177161", "0.70513463", "0.70497483", "0.7033415", "0.7021332", "0.6926903", "0.6830539", "0.68296474", "0.6816898", "0.6805124", "0.6768381", "0.6680146", "0.6676333", "0.66692996", "0.66565144", "0.65715444", "0.6530122", "0.64515483", "0.64502275", "0.64297235", "0.64297235", "0.6422089", "0.6422089", "0.6421461", "0.6420181", "0.64092445", "0.6408514", "0.6400567", "0.63771427", "0.63577485", "0.6324941", "0.63208354", "0.6267691", "0.62617856", "0.6232988", "0.61906064", "0.61898553", "0.61729014", "0.61493295", "0.6110121", "0.61075735", "0.61011046", "0.6092804", "0.60815775", "0.6079926", "0.6076066", "0.6072105", "0.6040751", "0.60235554", "0.6011926", "0.60110635", "0.59992963", "0.59897053", "0.5976417", "0.5961289", "0.5961281", "0.5958", "0.59460825", "0.5934906", "0.59284085", "0.59213454", "0.5916587", "0.59151375", "0.591067", "0.5909579", "0.59054184", "0.5892194", "0.5890888", "0.5875167", "0.5859434", "0.585568", "0.5852201", "0.5850092", "0.5848202", "0.5839417", "0.5826142", "0.58097595", "0.57983243", "0.5791872", "0.5788923", "0.5788806", "0.57858264", "0.5782107", "0.57678264", "0.57659596", "0.57652414", "0.5764083", "0.576049", "0.5732237", "0.5719675", "0.5715155", "0.57063025", "0.5706004", "0.5704745", "0.5704253" ]
0.73547125
4
Method to present data in a pretty way
def present_data(self, data=None): print('--------------------------------------------------------------------------') print('{:<10}{:<10}{:<15}{:<17}{:<17}'. format( 'index', 'name', 'surname', 'email', 'phone' ) ) print('--------------------------------------------------------------------------') data = data if data else self.contacts for contact in data: print('{:<10}{:<10}{:<15}{:<17}{:<17}'. format( contact[0], contact[1], contact[2], contact[3], contact[4] ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_data(self, ):\r\n return print('society_name : {}\\n'\r\n 'flat : {}\\n'\r\n 'house_no : {}\\n'\r\n 'no_of_members : {}\\n'\r\n 'income : {}\\n '\r\n .format(self.society_name, self.flat, self.house_no, self.no_of_members, self.income))", "def show_data():", "def PrettyPrint(self):\r\n print(self.data)\r\n return", "def print_pretty(self, data):\n length = max(map(lambda x: len(x), data.keys()))\n print '+-------------------------------------+'\n print '| Company Name | Year | Month | Value |'\n print '+-------------------------------------+'\n for key, value in data.items():\n print '| %s | %s | %s | %s |' % (key, \\\n value['year'], value['month'], value['value'])\n print '+-------------------------------------+'", "def pprint(self, data):\n self._assert(data)\n data = self._render(data) # make elements ascii\n fmats = self._fmats(data) # get array of padding formats)\n for row in data:\n print(fmats.format(*row))", "def show(self):\n if self.is_empty():\n print('[]')\n return\n line = '['\n for item in self._data:\n line += '(' + str(item._key) + ', ' + str(item._value) + '), '\n line = line[:-2] + ']'\n print(line)", "def display(\n self, \n data: Dict[str, Any] = {},\n is_stacked: bool = False\n ) -> Dict[str, str]:\n return self.render_record_metadata(data, is_stacked=is_stacked)", "def display_data(data):\n\n index = 0\n for details in data:\n index += 1\n print(\"{5:1}{0}. {1:10} in {2:15} priority {3:>3}\".format(index, *details))", "def pprint(self):\n\t\tPrettyPrintUnicode().pprint(self.data)", "def displayData(cls):\n return (\n \"paramName\",\n \"autoFollow\",\n \"lowerDisplay\",\n \"upperDisplay\",\n \"binCount\",\n \"xscale\",\n \"yweight\"\n )", "def show_info(self): \n color= Fore.WHITE\n print(f\"\"\" {color} \nNombre: {self.name} \nRuta: {self.route }\nFecha de salida: {self.departure_date}\"\"\")\n print(\"<\"*8, \">\"*8)\n print(\"El precio por habitacion es:\")\n for key, value in self.prize.items():\n color_value= (Fore.GREEN + str(value))\n color_key= Fore.WHITE + \"Habitacion\" + \" \" + key\n print(f\"\"\" {color_key} : {color_value}$ \"\"\")\n \n print(Fore.WHITE + \"<\"*8, \">\"*8)\n for floor, info in self.floors_info.items():\n piso=(Fore.WHITE + floor)\n print(f\" {piso}:{info} \")\n \n \n print(\"<\"*8, \">\"*8)\n print(\"Capacidad por tipo de habitacion: \")\n for key, value in self.room_capacity.items():\n print(f\"Habitacion {key}: {value} personas \",\"\\t\")\n return \"\"", "def print_data_list(self):\n print('\\n{0}'.format(self.webDataFrame))", "def __str__(self):\n \n result = [\"rows: \" + str(self.rows),\n \"columns: \"+str(self.columns),\n \"data: \"+str(self.data)]\n return \"\\n\".join(result)", "def display(self):\n print(\n f'\\t\\t {self.name.upper()} {self.potency[0]}{self.potency[1]}\\t\\t'\n f' {self.dose_qty[0]} {self.dose_qty[1]} {self.dose[0]} {self.dose[1].upper()}')", "def __str__(self) -> str:\n if self.data is not None:\n list_of_params = []\n for key, data_dict in self.data.to_dict(orient=\"index\").items():\n data_dict[\"index\"] = key\n list_of_params.append(data_dict)\n formated_list_of_params = self.format_params(list_of_params)\n return f\"\\n{tabulate(formated_list_of_params, headers='keys', tablefmt='fancy_grid')}\"\n else:\n return \"Empty DataFrame\"", "def _repr_html_(self):\n params = OrderedDict()\n params[\"Name\"] = self.name\n params[\"Description\"] = self.description\n params[\"Ns\"] = self.Ns\n params[\"Ni\"] = self.Ni\n params[\"Kinetic Parameter\"] = self.kinetic_parameter_type\n params[\"Kinetic Parameter Value\"] = self.kinetic_parameter_value \n \n header = \"<table>\"\n footer = \"</table>\"\n html = \"\"\n\n for key, val in params.items():\n html += \"<tr><td>{0}</td><td>{1}</td></tr>\".format(key, val)\n\n return header + html + footer", "def organize_data(self, data):\n presentation = \"\"\n\n offices = data['offices']\n living = data['living']\n\n presentation += \"OFFICE \\n\\n\"\n if len(offices) == 0:\n presentation += \"No Office allocations\\n\"\n\n else:\n presentation += \"Office Allocations\\n\"\n for office in offices:\n presentation += office['room'].capitalize() + \"\\n\"\n presentation += \"Members: \\n\"\n presentation += ','.join(office['names'])\n\n presentation += \"LIVING SPACES \\n\\n\"\n if len(offices) == 0:\n presentation += \"No Living space allocations\\n\"\n\n else:\n presentation += \"Office Allocations\\n\"\n for space in living:\n presentation += living['room'].capitalize() + \"\\n\"\n presentation += ','.join(living['names'])\n\n return presentation", "def __display(self) -> None:\n ligne = 0\n for key, value in self.values.items():\n self.my_data(self.master, key, value, ligne, 0, 1, 1, 2, 2)\n ligne += 2", "def __str__(self):\n data_string = \"\"\n for list_el in self.data_list:\n for inner_list_el in list_el:\n data_string += str(inner_list_el)\n data_string += \"\\t\"\n data_string += \"\\n\"\n return data_string", "def pretty_view(self):\n return self.pretty_response()", "def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass", "def printData (data):\n print(str(len(data)) + '\\t' + str(data))", "def viewdata(data):\n\n print('_' * 50)\n print('Number of Results: ' + str(data[0]['numResults']))\n print('\\nSearchURL: ' + data[0]['searchURL'])\n print('_' * 50)\n\n i = 1\n for m in data[1]:\n print(str(i) + '. ')\n for n in m:\n print(str(n) + ': ' + str(m[n]))\n i += 1\n print('\\n')", "def __repr__(self):\n\n\t\t# Preparing variables\n\t\tl_s_content = [\t\t# List containing the content to print\n\t\t\t\"> The structure object :\"\n\t\t]\n\n\t\t# PDB fields\n\t\tl_s_content.append(\"s_name : {}\".format(self.s_name))\n\n\t\t# Structural fields\n\t\tl_s_content.append(\"i_atom_count : {}\".format(self.i_atom_count))\n\t\tl_s_content.append(\"a_atoms : {}\".format(len(self.a_atoms)))\n\n\t\t# Grid fields\n\t\tl_s_content.append(\"b_loaded : {}\".format(self.b_loaded))\n\t\tl_s_content.append(\"a_grid : {}\".format(self.a_grid.size))\n\n\t\treturn \"\\n\".join(l_s_content)\t\t# Returns the content to show", "def the_display(self):\r\n return f\"\"\"\r\n {self.display[0]}\\n\r\n {self.display[1]}\\n\r\n {self.display[2]}\\n\r\n {self.display[3]}\\n\r\n {self.display[4]}\\n\r\n \"\"\"", "def display_data(self, title: str, subtitle: str = \"\\n\", datas: list = None):\n self.clean()\n print(f\"{title}\")\n print(f\"{subtitle}\\n\")\n for data in datas:\n print(f\"\\t{data}\")\n print(\"\\n\" * 2)\n self.stand_by_msg(\"\")", "def display(self):\n statement = f\"\"\"\n ------\n By {self.prescribed_by.name.upper()}\n ------\n Patient Detail!\n Name: {self.prescribed_to.name.capitalize()}\n Age: {self.prescribed_to.age}\n Gender: {self.prescribed_to.gender}\n Prescribed Medicines!\"\"\"\n print(statement)\n self.display_cure()", "def print_info(self):\n\n n_metabolites = len(self.metabolites)\n n_reactions = len(self.reactions)\n n_constraints = len(self.constraints)\n n_variables = len(self.variables)\n\n info = pd.DataFrame(columns=['value'])\n info.loc['name'] = self.name\n info.loc['description'] = self.description\n info.loc['num constraints'] = n_constraints\n info.loc['num variables'] = n_variables\n info.loc['num metabolites'] = n_metabolites\n info.loc['num reactions'] = n_reactions\n info.index.name = 'key'\n\n print(info)", "def prettify_details(data):\n new = []\n if \"terminaltables\" in sys.modules:\n for key, value in data.items():\n if key.startswith(\"__\"):\n continue\n if isinstance(value, (int, float)) and not isinstance(value, bool):\n new.append((key, \"{:15,.2f}\".format(value)))\n else:\n new.append((key, value))\n table = terminaltables.DoubleTable(new)\n table.inner_heading_row_border = False\n table.justify_columns[1] = 'right'\n return table.table.replace(\"\\n\", \"<br />\")\n else:\n formatted = json.dumps({k: v for k, v in data.items()\n if not k.startswith(\"__\")}, indent=4)\n new = formatted[2:-2].replace(\"\\n\", \"<br />\")\n return new", "def _repr_html_(self):\n return self.data.to_html()", "def display(self):\r\n msg = \"{wName:{n %s\\n\" % self.data.get(\"name\")\r\n plot = self.data.get(\"plot\")\r\n if plot:\r\n plot = Plot.objects.get(id=plot)\r\n msg += \"{wPlot:{n %s\\n\" % plot\r\n msg += \"{wMain Host:{n %s\\n\" % self.owner\r\n hosts = PlayerOrNpc.objects.filter(id__in=self.data.get(\"hosts\", []))\r\n if hosts:\r\n msg += \"{wOther Hosts:{n %s\\n\" % \", \".join(str(ob) for ob in hosts)\r\n msg += \"{wPublic:{n %s\\n\" % (\r\n \"Public\" if self.data.get(\"public_event\", True) else \"Private\"\r\n )\r\n msg += \"{wDescription:{n %s\\n\" % self.data.get(\"desc\")\r\n msg += \"{wDate:{n %s\\n\" % self.data.get(\"date\")\r\n location = self.data.get(\"location\")\r\n if location:\r\n from typeclasses.rooms import ArxRoom\r\n\r\n try:\r\n location = ArxRoom.objects.get(id=location)\r\n except ArxRoom.DoesNotExist:\r\n location = None\r\n self.data[\"location\"] = None\r\n msg += \"{wLocation:{n %s\\n\" % location\r\n plotroom = self.data.get(\"plotroom\")\r\n if plotroom:\r\n plotroom = PlotRoom.objects.get(id=plotroom)\r\n msg += \"{wPlotroom:{n %s\\n\" % plotroom\r\n msg += \"{wLargesse:{n %s\\n\" % dict(RPEvent.LARGESSE_CHOICES).get(\r\n self.data.get(\"celebration_tier\", 0)\r\n )\r\n gms = PlayerOrNpc.objects.filter(id__in=self.data.get(\"gms\", []))\r\n if gms:\r\n msg += \"{wGMs:{n %s\\n\" % \", \".join(str(ob) for ob in gms)\r\n msg += \"{wRisk:{n %s\\n\" % dict(RPEvent.RISK_CHOICES).get(\r\n self.data.get(\"risk\", RPEvent.NORMAL_RISK)\r\n )\r\n orgs = PlayerOrNpc.objects.filter(id__in=self.data.get(\"orgs\", []))\r\n if orgs:\r\n msg += \"{wOrg invitations:{n %s\\n\" % \", \".join(\r\n str(org) for org in self.orgs\r\n )\r\n invites = PlayerOrNpc.objects.filter(id__in=self.data.get(\"invites\", []))\r\n if invites:\r\n msg += \"{wInvitations:{n %s\\n\" % \", \".join(str(ob) for ob in invites)\r\n actions = self.data.get(\"actions\", [])\r\n if actions:\r\n msg += \"{wRelated Actions:{n %s\\n\" % \", \".join(str(ob) for ob in actions)\r\n return msg", "def print_data(self):\n total_score = 0.0\n\n title_game = 'Game'\n title_word = 'Word'\n title_word_status = 'Word Status'\n title_bad_guesses = 'Bad Guesses'\n title_missed_letters = 'Missed Letters'\n title_total_score = 'Total score'\n\n if not record_word:\n print(\"No words played.\")\n else:\n print('%-5s %-10s %-12s %-5s %-5s %s' %(title_game,title_word, title_word_status, title_bad_guesses, title_missed_letters,title_total_score))\n print('---- ---- ------------ ----------- -------------- -----------')\n for x in range(len(record_word)):\n print('%-5s %-10s %-13s %-11s %-13s %.2f'%(record_game[x],record_word[x],record_word_status[x],record_bad_guesses[x],record_missed_letters[x],record_total_score[x]))\n\n for x in range(len(record_total_score)):\n total_score = total_score + record_total_score[x]\n\n print('\\nFinal Score: %.2f' %total_score)", "def __str__(self):\n txt = ''\n if self.PrintHeader:\n txt = \" |\" + \"|\".join(sorted(self.rows[0].keys())).expandtabs() + \"|\"\n txt += \"\\n\"\n txt += \"|-\"\n for r in self.rows:\n txt += \"\\n|\"\n txt += \"|\".join([str(uround(r[key] , 2) if isinstance(r[key], (int, long, float, complex , Variable,AffineScalarFunc )) else r[key]) for key in sorted(self.rows[0].keys())]) + \"|\"\n txt += \"\\n|-\"\n if self.PrintSum:\n txt += \"\\n\"\n sumRow = self.GetSumRow()\n txt += \"| |\" + \"|\".join( [str(uround(sumRow[key] , 2) if isinstance(sumRow[key], (int, long, float, complex , Variable ,AffineScalarFunc )) else sumRow[key]) for key in sorted(self.rows[0].keys())[1:]] ) + \"|\"\n\n return txt", "def printpretty(self):\n print(self.string_rep())", "def printObj(self):\n return 'patient_id:{}, medication:{}, frequency:{}, start_dt:{},'\n 'end_dt:{}, noti_type:{}'.format(\n self.patients.data,\n self.medication.data,\n self.frequency.data,\n self.start_dt,\n self.end_dt.data,\n self.noti_type.data)", "def _showdata(self, prec=4):\n print('nh {0:d} nslices {1:d} nbl {2:d} ncp {3:d} nca {4:d} '.format(\n self.nh, self.nslices, self.nbl, self.ncp, self.nca), end=\"\")\n print(\"observables in np arrays with {:d} rows\".format(self.nslices))\n\n if len(self.observables) == 4:\n print('nca', self.nca)\n else:\n print()\n np.set_printoptions(precision=prec)\n\n print(self.fp.shape, \"fp (degrees, but stored internally in radians):\\n\",\n self.fp*self.degree, \"\\n\")\n print(self.fa.shape, \"fa:\\n\", self.fa, \"\\n\")\n\n print(self.cp.shape, \"cp (degrees, but stored internally in radians):\\n\",\n self.cp*self.degree, \"\\n\")\n if len(self.observables) == 4:\n print(self.ca.shape, \"ca:\\n\", self.ca, \"\\n\")\n # print(self.info4oif_dict)\n\n print(\"hole centers array shape:\", self.ctrs.shape)\n\n print(len(self.bholes), \"baseline hole indices\\n\", self.bholes)\n print(self.bls.shape, \"baselines:\\n\", self.bls)\n\n print(self.tholes.shape, \"triple hole indices:\\n\", self.tholes)\n print(self.tuv.shape, \"triple uv vectors:\\n\", self.tuv)\n\n print(self.qholes.shape, \"quad hole indices:\\n\", self.qholes)\n print(self.quvw.shape, \"quad uvw vectors:\\n\", self.quvw)", "def output(self):\n print \"Name:\", self.name\n print \"City:\", self.city\n print \"Country:\", self.country\n print \"Number of Reviews:\", len(self.sentiments)\n print \"Old Reviews (Stars):\", self.stars_avg\n print \"Old Reviews (%):\", self.stars_avg/5\n print \"New Rating (Stars)\", self.new_rating*5\n print \"New Rating (%):\", self.new_rating", "def overview(data):\n\n printer.table(['Name', 'El', 'Invariom name', 'Model compound'], head=True)\n for atom in data.iter_atoms(True):\n printer.table([atom.name, atom.element, atom.invariom_name, atom.invariom.molecule.name])\n printer.table(done=True)", "def __str__(self):\n print_info = f\"\\nStudent ID: {self._id}, Name: {self._name}, \" \\\n f\"Year: {self._year} \\nPhone: {str(self._phone)}, \" \\\n f\"Address: {str(self._address)} \" \\\n f\"\\nClasses: {str(self._classes)}\" \\\n f\"\\nBirth Date: {self._date}\"\n return print_info", "def __str__(self):\n r = []\n for item in sorted(self._data.keys()):\n correct, incorrect = self._data[item][True], self._data[item][False]\n acc = correct / (correct + incorrect)\n s = f\"{item:4} | Accuracy: {acc:.2f}% (diff {'+' if acc-item >=0 else ''}{acc-item:.2f}%) | correct: {correct:2}, incorrect: {incorrect:2}\" \n r.append(s)\n\n return \"\\n\".join(r)", "def __str__(self):\n tabuleiro = prettytable.PrettyTable(header=False)\n for linha in self.tabuleiro:\n tabuleiro.add_row(linha)\n return str(tabuleiro)", "def pprint(self):\n print(self.pprint_str())", "def print_data_members(self):\n keyVals = []\n for name in self.data_code['dataNames']:\n vals = getattr(self, name + 's')\n keyVals.append(vals)\n #print \"%ss = %s\" %(name,vals)\n\n msg = ''\n for name in self.data_code['dataNames']:\n msg += '%-10s ' % name\n msg += '\\n'\n\n nModes = len(keyVals[0])\n for i in xrange(nModes):\n for vals in keyVals:\n msg += '%-10g ' % vals[i]\n msg += '\\n'\n return msg + '\\n'", "def display_data(self):\n # type: () -> dict\n return {}", "def __str__(self):\n data_string = \"\"\n for i in range(self.height):\n row = \"\"\n for j in range(self.width):\n row += \"|\" + self.data[i][j]\n row += \"|\\n\"\n data_string += row\n data_string += \"-\" * (2 * self.width + 1)\n data_string += \"\\n\"\n\n # Column numbers are labeled modulo 10 to keep the characters\n # aligned correctly\n for i in range(self.width):\n data_string += \" \" + str(i % 10)\n\n return data_string", "def print_detailed_summary(data):\n longest_name = 0\n # Determine longest name for width formatting\n for item in data:\n if len(item[1]) > longest_name:\n longest_name = len(item[1])\n # Print formatted data\n for item in data:\n print(\"{} is taught by {:{width}} and has {:>3} students\".format(item[0], item[1], item[2], width=longest_name))", "def python_data_printer(cur):\n # Print a header.\n for fieldDesc in cur.description:\n print (fieldDesc[fdb.DESCRIPTION_NAME].ljust(fieldDesc[fdb.DESCRIPTION_DISPLAY_SIZE]),end=' ')\n print('')\n for fieldDesc in cur.description:\n print (\"-\" * max((len(fieldDesc[fdb.DESCRIPTION_NAME]),fieldDesc[fdb.DESCRIPTION_DISPLAY_SIZE])),end=' ')\n print('')\n # For each row, print the value of each field left-justified within\n # the maximum possible width of that field.\n fieldIndices = range(len(cur.description))\n for row in cur:\n for fieldIndex in fieldIndices:\n fieldValue = row[fieldIndex]\n if not isinstance(fieldValue,types.StringTypes):\n fieldValue = str(fieldValue)\n if isinstance(fieldValue,types.UnicodeType):\n fieldValue = fieldValue.encode('utf8')\n fieldMaxWidth = max((len(cur.description[fieldIndex][fdb.DESCRIPTION_NAME]),cur.description[fieldIndex][fdb.DESCRIPTION_DISPLAY_SIZE]))\n print (fieldValue.ljust(fieldMaxWidth),end=' ')\n print('')", "def pretty_print(self):\n pt = PrettyTable()\n for i in self.files_summary:\n pt.field_names = [\"File Name\", \"Classes\", \"Functions\", \"Lines\", \"Characters\"]\n pt.add_row(list([i, self.files_summary[i][\"class\"], self.files_summary[i][\"function\"], self.files_summary[i][\"line\"], self.files_summary[i][\"char\"]]))\n print(pt) #Using a Print statement here because i tried to return self.pt and it didnt give me anything but the print works", "def display(self):\r\n return str((self.last_name + \", \" + self.first_name+\": \" + self.phone_number + \"\\n\" + self.address + \"\\nStart Date: \" +\r\n self.start_date.strftime(\"%m\") + \"/\" + self.start_date.strftime(\"%d\") +\r\n \"/\" + self.start_date.strftime(\"%Y\")+\"\\nSalary: $\" + str(self.salary)))", "def print(self):\n print(self.pretty_str())", "def _show(self, indent = 0):\n print(\" \"*indent, \"Name:\", self.name)\n print(\" \"*indent, \"Description:\", self.description)", "def show_all_information(self):\n return self.__dict__\n # print(self.first_name)\n # print(self.last_name)\n # print(self.age)\n # print(self.name)\n # print(self.gender)\n # print(self.number_of_children)", "def __repr__(self):\n (sections, section_titles) = self._get_summary_struct()\n return _toolkit_repr_print(self, sections, section_titles, width=30)", "def display_simple(self):\n print(\"\") \n print(\"Date: {}\".format(self.date))\n print(\" Task name: {}\".format(self.task_name))\n print(\" Time spent: {} minutes\".format(self.time_spent))\n print(\" Notes: {}\".format(self.notes))\n print(\" Task number: {}\".format(self.task_number))\n print(\"\")", "def __str__(self):\n # Set up title\n r = '{:20.19} {:>10} {:>10}\\n'\n t = r.format(self.name, 'Days', 'FRBs')\n line = '-'*len(t.split('\\n')[-2].strip()) + '\\n'\n t += line\n\n # Format rates\n rdays = round(self.days, 3)\n t += r.format('In population', rdays, round(self.tot()))\n t += r.format('Detected', rdays, round(self.det, 3))\n t += r.format('Too late', rdays, round(self.late, 3))\n t += r.format('Too faint', rdays, round(self.faint, 3))\n t += r.format('Outside survey', rdays, round(self.out, 3))\n t += r.format('/Gpc^3', 365.25, round(self.vol, 3))\n t += r.format('Expected', round(self.exp, 4), 1)\n t += line\n\n return pprint(t, output=False)", "def display(self):\n self.display_divider()\n self.display_row(self.column_names)\n self.display_divider()\n for row in self.rows:\n self.display_row(row)\n self.display_divider()", "def display(self):\n # type: ()->None\n print('============')\n for key, value in self._ifAttributes.items():\n if isinstance(value, list):\n print(key + ': ')\n for item in value:\n print('\\t' + item)\n elif isinstance(value, dict):\n print(key + ': ')\n for item in value.keys():\n print('\\t' + item + ': ' + value[item])\n else:\n print(key + ': ' + str(value))\n print('============')", "def print_individuals(self):\n pt = PrettyTable()\n pt.field_names = ['ID', 'Name', 'Gender', 'Birthday', 'Age', 'Alive', 'Death', 'Child', 'Spouse']\n for i in self.individuals.values():\n pt.add_row(i.get_values())\n print(pt)", "def __repr__(self):\n indent = len(self.type) + 2\n jstr = ',\\n' + ' ' * indent\n\n props = self._display_properties()\n\n params = jstr.join('{:}={:}'.format(p, summary(self[p],\n indent=indent))\n for (p, dp) in props)\n return '<{}({:})>'.format(self.type, params)", "def render(self):\n return ('Application: ' + self.application, 'Data: ' + self.data)", "def print_table(self):\n print(\"%-12s%-12s%-12s%-12s%-12s\" % (\"index\",\"balance\",\"payment\",\"interest\",\"amortization\"))\n print(\"-------------------------------------------------------------\")\n for i in self.table[\"index\"]:\n print(\"%-12i%-12i%-12i%-12i%-12i\" % (self.table[\"index\"][i],self.table[\"balance\"][i]\\\n ,self.table[\"payment\"][i],self.table[\"interest\"][i],\\\n self.table[\"amortization\"][i]))", "def show(self, lst=None):\n\n def f(v):\n if np.size(v) == 1:\n return str(v)\n elif np.size(v) > 3:\n return str(np.shape(v))\n elif np.ndim(v) > 1:\n return str(np.shape(v))\n else:\n return str(v)\n\n def buffer(l, m, n=25):\n end = len(l) - 1\n buffered = []\n for i in range(m):\n if i > end:\n buffered.append(\"\".ljust(n))\n else:\n buffered.append(l[i].ljust(n))\n return buffered\n\n lst = self if lst is None else lst\n out = [IND.ljust(7) + INDEP.ljust(60) + DEP.ljust(60)]\n for row in lst:\n ind = [str(row[IND])]\n dep = [k + \": \" + f(v) for k, v in row[DEP].items()]\n indep = [k + \": \" + f(v) for k, v in row[INDEP].items()]\n m = max(len(dep), len(indep), 1)\n ind = buffer(ind, m, 7)\n dep = buffer(dep, m, 60)\n indep = buffer(indep, m, 60)\n for a, b, c in zip(ind, indep, dep):\n out.append(a + b + c)\n out.append(\"\")\n return \"\\n\".join(out)", "def print_data(place):\n raise NotImplementedError", "def __repr__(self):\n\n (sections, section_titles) = self._get_summary_struct()\n return _tkutl._toolkit_repr_print(self, sections, section_titles, width=30)", "def print_details(self):\n print(\"[{}]\".format(self.name))\n print(\"ID: \" + str(self.id))\n print(\"name: %s\" % self.name)\n print(\"URL: %s\" % self.url)\n print(\"CPUs: \" + str(self.cpus) + \" cores\")\n print(\"Mem: \" + self.memory_str)\n print(\"Tasks: \" + str(self.tasks_len))\n print(\"Uptime %s\" + self.uptime)\n print(\"Uptime Descriptive %s\" + self.uptime_descriptive)\n print(\" \")", "def __str__(self):\n s = \"Projection info:\\n\"\n s += \" #instances: \" + str(self.data_ninstances) + \"\\n\"\n s += \" data dimension: \" + str(self.data_dim) + \"\\n\"\n s += \" projection dimension: \" + str(self.projection_dim) + \"\\n\"\n s += \" data: \" + str(self.data[0]) + \"\\n\"\n s += \" \" + str(self.data[1]) + \"...\\n\"\n s += \" projection: \" + str(self.projection[0]) + \"\\n\"\n s += \" \" + str(self.projection[1]) + \"...\"\n return s", "def __str__(self):\n\n outstr = 'gear wheel data:\\n'\n # output gear data\n for date in self.data:\n outstr += date.ljust(10) + ':\\t' + str(self.data.get(date)) + '\\n'\n\n # output modification data\n if self.modifications:\n outstr += '\\nflank modifications:\\n'\n for date in self.modifications:\n outstr += date.ljust(10) + ':\\t' + str(self.modifications.get(date)) + '\\n'\n\n # output tooth form coordinates\n if self.formcoords:\n # upper and lower index of point-array\n outstr += '\\ntooth form coordinates:\\n'\n for coord in self.formcoords:\n outstr += str(coord[0]) + '\\t' + str(coord[1]) + '\\n'\n\n return outstr", "def __repr__(self):\n output = \"\"\n output +=\"V:\\n\"\n for row in self.V:\n output += \"\\t\"\n for el in row:\n output += str(el) + \" \" \n output += \"\\n\" \n \n output += \"\\nW:\\n\"\n for row in self.W:\n output += \"\\t\"\n for el in row:\n output += str(el) + \" \" \n output += \"\\n\"\n return output", "def display(self) -> str:\n lines, _, _, _ = self._display_aux()\n return '\\n'.join(lines)", "def __str__(self):\n print('=' * 20, \"Subject Information\", '=' * 20)\n print(\"Subject Name: {}\".format(self.name))\n print(\"Pulse Data Length for general questions\")\n print(self.pulse_length[0:20])\n print(\"Number of general Questions: {}\".format(\n len(self.pulse_data[0])))\n print(\"Pulse Data Length for video 1\")\n print(\"Number of questions for video 1: {}\".format(\n len(self.pulse_data[1])))\n print(self.pulse_length[20:40])\n print(\"Pulse Data Length for video 2\")\n print(\"Number of questions for video 2: {}\".format(\n len(self.pulse_data[0])))\n print(self.pulse_length[40:60])\n print('Label Data')\n print(self.label_data)\n print('Label Data shape: {}'.format(self.label_data.shape))\n\n return ''", "def output(self):\n \n str_title_len = 50\n str_date_len = 40\n str_purpose_len = 30\n str_price_len = 10\n str_payer_len = 20\n #str_comment_len =\n \n if len(self.title) > (str_title_len - 2):\n out_title = self.title[:str_title_len - 2] + \" |\"\n else:\n out_title = self.title + (\" \" * (str_title_len - len(self.title) - 2)) + \" |\"\n \n # if date is presented with <datetime> object, then\n # then output it in format %d.%m.%y (31.12.99)\n if type(self.date) is datetime.datetime:\n out_date = \" \" + datetime.datetime.strftime(\"%d.%m.%y\") + \" |\"\n # or output as string otherwise\n else:\n if len(self.date) > (str_date_len - 4):\n out_date = \" \" + self.date[:str_date_len - 4] + \" |\"\n else:\n out_date = \" \" + self.date + (\" \" * (str_date_len - len(self.date) - 4)) + \" |\"\n \n if len(self.purpose) > (str_purpose_len - 4):\n out_purpose = \" \" + self.purpose[:str_purpose_len - 4] + \" |\"\n else:\n out_purpose = \" \" + self.purpose + (\" \" * (str_purpose_len - len(self.purpose) - 4)) + \" |\"\n \n # enormous sums aren't supported (over 9999999 at the moment)\n if len(str(self.price)) > (str_price_len - 4):\n raise Exception\n out_price = (' ' * (str_price_len - len(str(self.price)) - 4) ) + str(self.price) + ' |'\n \n if len(self.payer) > (str_payer_len - 2):\n out_payer = \" \" + self.payer[:str_payer_len - 2]\n else:\n out_payer = \" \" + self.payer + (\" \" * (str_payer_len - len(self.payer) - 2))\n \n out_line = out_title + out_date + out_purpose + out_price + out_payer\n return out_line", "def show(self):\r\n \r\n clear() \r\n print \" \" + \"-\" * self.__width + \" \"\r\n \r\n for row in self.__buffer:\r\n rowData = \"\".join(str(i) for i in row)\r\n print \"|\" + rowData + \"|\"\r\n\r\n print \" \" + \"-\" * self.__width + \" \"\r\n self.clearBuffer()", "def display(self):\n print(f'{self.first_name} {self.last_name}, Customer#: '\n f'{self.customer_id}\\n{self.address}\\n{self.phone_number}\\n'\n f'{self.create_invoice()}')", "def printDetails(self):\n print str(self.number) + \": \" + self.title\n print \"URL: \" + self.URL\n print \"domain: \" + self.domain\n print \"score: \" + str(self.score) + \" points\"\n print \"submitted by: \" + self.submitter\n print \"# of comments: \" + str(self.commentCount)\n print \"'discuss' URL: \" + self.commentsURL\n print \"HN ID: \" + str(self.id)\n print \" \"", "def __repr__(self):\n\n (sections, section_titles) = self._get_summary_struct()\n\n return _toolkit_repr_print(self, sections, section_titles, width=30)", "def display_all(self):\n print(\"Price: \" + str(self.price))\n print(\"Speed: \" + str(self.speed) + \"mph\")\n print(\"Fuel: \" + self.fuel)\n print(\"Mileage: \" + str(self.mileage) + \"mpg\")\n print(\"Tax: \" + str(self.tax))\n return self", "def __repr__(self):\n dataListStrs = []\n for data in self:\n dataListStrs.append(repr(data))\n dataListStrs.append(\"None\")\n return \" -> \".join(dataListStrs)", "def bug_details_display(self,**kwargs):\n row=self.bug_data(**kwargs)\n print(\"*******************\")\n for k in row.keys():\n print(k,\":\", str(row[k]).replace(\"\\n\",\"\\n{}> \".format(k)))\n print(\"*******************\")", "def show(self): # pragma: no cover\n if self.data is None:\n raise AttributeError(\"The data must be deconvolved first !\")\n self.data.show()", "def format(self, data):", "def __str__(self):\n\n return (self._display_name + \"\\n\\t\" + self._description + \"\\n\")", "def display(self):\n for i in range(0, len(self.top_row)):\n self.top_row[i].display()\n for i in range(0, len(self.bottom_row)):\n self.bottom_row[i].display()\n for i in range(0, len(self.left_col)):\n self.left_col[i].display()\n for i in range(0, len(self.right_col)):\n self.right_col[i].display()", "def __repr__(self):\n output = ''\n for grp_id, col_list in self.data.items():\n output += grp_id + ':\\n'\n for col in col_list:\n output += ' ' * 4 + col + '\\n'\n return output", "def display(self):\n return f'{self._last_name},{self._first_name}:({self._student_id}) {self._major} gpa:{self._gpa}'", "def show_data(df):\n printmd(str(\"The Data contains **\" + str(df.shape[0])+ '** rows.'))\n printmd(\"*__Sample of the data :__*\")\n display(df.head(n=5))\n print(\"\")\n print(\"\")", "def Display(self, unused_args, result):\n util.PrettyPrint(result)", "def Display(self, unused_args, result):\n util.PrettyPrint(result)", "def display(self):\n for value, prob in self.items():\n print(value, prob)", "def __repr__(self):\n\n width = 40\n\n sections, section_titles = self._get_summary_struct()\n out = _tkutl._toolkit_repr_print(self, sections, section_titles,\n width=width)\n return out", "def __str__(self):\r\n return f\" Title : {self.title}, Author : {self.author}, Publish_year : {self.publish_year},\" \\\r\n f\" Pages : {self.pages} , Language : {self.language} , Price : {self.price} \"", "def print_data(self):\n for chain, gen in self.generations.items():\n print('Generations for chain %s: %d' % (chain, gen))\n print('Log likelihood effective size: %d' % self.loglik_effsize)\n print('Log likelihood relative difference: %f' % self.loglik_rel_diff)\n print('Max diff: %f' % self.max_diff)", "def pretty_display(self):\n\t\tpretty_space = PrettyTable()\n\t\tpretty_space.field_names = range(self.space.shape[1])\n\t\tcount = 0\n\t\tpretty_row = []\n\t\tfor cell in self.space.flat:\n\t\t\tcount = count + 1\n\t\t\tpretty_row.append(cell.state)\n\t\t\tif count >= self.space.shape[1]:\n\t\t\t\tpretty_space.add_row(pretty_row)\n\t\t\t\tcount = 0\n\t\t\t\tpretty_row = []\n\t\tprint(pretty_space)", "def print_data(self, e=0):\n #self.third_mnemo.Hide()\n #print(self.d)\n #print \"n: \" + str(self.n)\n #print \"data dict: \" + str(self.d)\n #print \"file: \" + self.file\n #print \"content: \" + self.txt\n #print(self.n)\n # print \"n: \" + str(self.n) + \" parent: \" + str(self.d[self.n][0]) + \" previous: \" + str(self.d[self.n][1]) + \" mext: \" + str(self.d[self.n][2]) + \" item: \" + str(self.d[self.n][3]) + \" mnemo: \" + str(self.d[self.n][4])\n pass", "def __repr__(self):\n return ''.join(f'\\ncompany: {self.company_name}\\nsize: {self.company_size}\\ncompany_founded: '\n f'{self.company_founded}\\ncompany_industry: {self.company_industry}\\ncompany_sector: '\n f'{self.company_sector}\\ncompany_type: {self.company_type}\\ncompany_rating: '\n f'{self.company_rating}\\ncompany_competitors: {self.company_competitors}\\ncompany_revenue: '\n f'{self.company_revenue}\\ncompany_headquarters: {self.company_headquarters}')", "def display(self):\r\n return str((self.last_name + \", \" + self.first_name+\": \" + self.phone_number + \"\\n\" + self.address + \"\\nStart Date: \" +\r\n self.start_date.strftime(\"%m\") + \"/\" + self.start_date.strftime(\"%d\") +\r\n \"/\" + self.start_date.strftime(\"%Y\")+\"\\nHourly Pay $\" + str('%.2f' % self.hourly_pay)))", "def __str__(self):\n number_stars = (30-len(self.name))//2\n title_line = '*'*number_stars+self.name+'*'*number_stars\n corpus = ''\n for i in range(len(self.ledger)):\n corpus += (((self.ledger[i])['description']))[0:min(23, len((self.ledger[i])['description']))].ljust(23)+(\n str(\"{:.2f}\".format(round(float((self.ledger[i])['amount']), 2)))).rjust(7)+'\\n'\n Total = 'Total: '+str(\"{:.2f}\".format((round(float(self.get_balance()), 2))))\n return title_line+'\\n'+corpus+Total", "def display(self):\r\n\t\tfor key, value in self.__dict__.items():\r\n\t\t\tprint(key.upper(), value, sep=': ')\r\n\r\n\t\tprint(\"\")", "def show(self) -> None:", "def pretty_print(self) -> PrettyTable:\n table_contain: PrettyTable = PrettyTable()\n table_contain.field_names = [\n \"File Name\", \"Classes\", \"Functions\", \"Lines\", \"Characters\"]\n for key, value in self.files_summary.items():\n table_contain.add_row([key] + list(value.values()))\n\n return table_contain", "def __str__(self):\n result = ''\n for row in range(self.getHeight()):\n for col in range(self.getWidth()):\n result += str(self.data[row][col]) + ' '\n result += '\\n'\n return result" ]
[ "0.7862266", "0.76273274", "0.7573931", "0.7204881", "0.6953377", "0.6934812", "0.68800884", "0.68433625", "0.6780841", "0.67295384", "0.6708125", "0.6696236", "0.6686045", "0.6659192", "0.6655323", "0.6651854", "0.6643173", "0.6629567", "0.661392", "0.6545845", "0.6533608", "0.65329474", "0.65290236", "0.64943075", "0.6478351", "0.64635944", "0.64634985", "0.6452469", "0.6444533", "0.64324516", "0.6415544", "0.6411605", "0.64051956", "0.64013076", "0.6401207", "0.64002514", "0.639002", "0.6389733", "0.6383639", "0.63831884", "0.6370483", "0.63700116", "0.6359389", "0.6352982", "0.6349057", "0.63476604", "0.63459736", "0.6317196", "0.63170743", "0.6312", "0.6311846", "0.6311812", "0.63052917", "0.630232", "0.62941414", "0.62939304", "0.62923896", "0.627272", "0.62718207", "0.6270575", "0.62659585", "0.6259508", "0.6257962", "0.6243098", "0.6234984", "0.62281096", "0.6226557", "0.62239003", "0.6222888", "0.62195146", "0.62159336", "0.6207596", "0.62063926", "0.62056756", "0.6202922", "0.6201013", "0.6196384", "0.6193237", "0.61911494", "0.6187416", "0.6182832", "0.6177244", "0.6174858", "0.61732197", "0.6170863", "0.61656946", "0.61656946", "0.61652607", "0.61637944", "0.6163297", "0.61605775", "0.6158318", "0.6157275", "0.61558014", "0.6155567", "0.61509377", "0.61504024", "0.61483335", "0.6146957", "0.6146161" ]
0.6941114
5
Get number of prunable parameters
def num_prunable_parameters(self) -> int: return sum(l.weight.numel() for l in self.emb_layers) + \ sum(weight.numel() for weight in self.emb_projs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_parameters(self):\n return self.pdm.n_parameters", "def num_params(self):", "def get_parameter_numbers(self) -> int:\n # TODO(jeikeilim): return the number of parameter list of each layers.\n n_param = sum([x.numel() for x in self.model.parameters()])\n return n_param", "def n_parameters(self):\n return sum([p.n_parameters for p in self.parameters])", "def n_parameters(self) -> int:\n return nkjax.tree_size(self.parameters)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters())", "def N(self):\n return len(self.parameters)", "def number_of_parameters(self):\n return len(self.parameters)", "def get_num_parameters(self):\n return len(self.parameters)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def count_params(self):\n self.N = 0\n for name, param in self.model.named_parameters():\n self.N += param.numel()\n self.N_list.append(self.N)", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def get_num_params(self):\n if self.num_params is None:\n self.num_params = len(self.params)\n return self.num_params", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def calculate_num_params(self):\n num_params = 0\n for p in self.parameters():\n num_params += p.data.view(-1).size(0)\n return num_params", "def count_parameters(model, tunable_only: bool = True) -> int:\n if tunable_only:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n else:\n return sum(p.numel() for p in model.parameters())", "def n_parameters(self):\n return len(self._LIST_PARAMETERS)", "def num_parameters(self) -> int:\n return len(self) * self.convention.value", "def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)", "def count_params():\n param_count = np.sum([np.prod(x.get_shape().as_list()) for x in tf.global_variables()])\n return param_count", "def _n_parameters(self):\n raise NotImplementedError", "def nb_parameters(net):\n return sum(p.numel() for p in net.parameters())", "def num_parameters(model):\n return sum([param.nelement() for param in model.parameters()])", "def num_params(self) -> int:\n return self._num_params", "def n_variables(self):\n return sum([p.n_variables for p in self.parameters])", "def num_parameters(self) -> int:\n if self._model:\n return self._model.num_parameters()\n return 0", "def num_parameters(self) -> int:\n if vocabulary.is_empty(self.vocab, self.config.features.configured_namespaces):\n self._LOGGER.warning(\n \"At least one vocabulary of your features is still empty! \"\n \"The number of trainable parameters usually depends on the size of your vocabulary.\"\n )\n return sum(p.numel() for p in self._model.parameters())", "def count_params(model):\n param_count = np.sum([np.prod(p.size()) for p in model.parameters()])\n return param_count", "def get_num_params(self):\n if self.num_params is None:\n import inspect\n argspec = inspect.getfullargspec(self.get_code())\n if argspec.varargs or argspec.varkw:\n self.num_params = -1\n else:\n self.num_params = len(argspec.args)\n return self.num_params", "def count_parameters(model):\r\n count = 0\r\n for parameter in list(model.parameters()):\r\n subcount = 1\r\n for size in list(parameter.size()):\r\n subcount *= size\r\n count += subcount\r\n return count", "def num_params(self):\n return len(self.params)", "def num_param(self):\n return len(self._parameters)", "def num_params(self):\r\n return np.sum([torch.tensor(param.shape).prod()\r\n for param in self.parameters()])", "def num_params(self):\n return np.sum([torch.tensor(param.shape).prod()\n for param in self.parameters()])", "def count_parms(self):\n min_freq = self.get_high_pass_index()\n rejection = self.rejection_at(np.arange(min_freq, self.nf))\n if rejection.ndim < 2:\n return np.sum(rejection)\n else:\n return np.sum(rejection, axis=1)", "def countParameters(self):\n return sum(p.numel() for p in self.model.parameters() if p.requires_grad)", "def _get_parameter_count(self):\n parameters_d = 5;\n size_h = self.model.size_h\n return (size_h - 1) + size_h * (\n (size_h - 1) + parameters_d + (self.model.size_aa - 1) + \n (self.model.size_ss - 1) + (self.model.size_cis - 1)\n )", "def count_params(model):\n total = 0\n for x in model.trainable_variables:\n total += np.prod(x.shape)\n return total", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def N(self) -> int:\n return self.params.N", "def countParam(self):\n return self.decl.args[mpi_array_calls[self.decl.name][self.pos]]", "def param_size(module:nn.Module):\n return np.sum(v.numel() for name, v in module.named_parameters() \\\n if \"auxiliary\" not in name)", "def nVariables(self):\n return len(self.variables)", "def count_parameters(also_print=True):\n total = 0\n if also_print:\n logging.info('Model Parameters:')\n for (_, v) in get_vars_to_save_and_restore().items():\n shape = v.get_shape()\n if also_print:\n logging.info('%s %s: %s', v.op.name, shape,\n format_number(shape.num_elements()))\n total += shape.num_elements()\n if also_print:\n logging.info('Total: %s', format_number(total))\n return total", "def count_parameters():\n total_parameters = 0\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n name = variable.name\n shape = variable.get_shape()\n #print(shape)\n #print(len(shape))\n variable_parameters = 1\n for dim in shape:\n #print(dim)\n variable_parameters *= dim.value\n print(name, [dim for dim in shape], variable_parameters)\n total_parameters += variable_parameters\n print('Number of trainable parameters = {}'.format(total_parameters))", "def count_parameters(model):\n\treturn sum(p.numel() for p in model.parameters() if p.requires_grad)", "def n_params(model):\n \n n_params=sum([\n np.prod([tensor.size()[k] for k in range(len(tensor.size()))])\n for tensor in list(model.parameters())])\n \n return n_params", "def n_global_parameters(self):\n return self.global_transform.n_parameters", "def count_parameters(net):\r\n return sum(p.numel() for p in net.parameters() if p.requires_grad)", "def n_params(self, t_id):\n all_params = set()\n for i in range(t_id+1):\n model = self.get_model(i)\n all_params.update(model.parameters())\n all_params.update(model.buffers())\n\n return sum(map(torch.numel, all_params))", "def num_params():\n total_num = 0\n for var in tf.trainable_variables():\n shape = var.get_shape()\n total_num += functools.reduce(operator.mul, [dim.value for dim in shape], 1)\n return total_num", "def count_parameters(self) -> Tuple[int, int]:\n c_trained, c_total = 0, 0\n for p in self.parameters():\n increment = reduce(lambda x, y: x * y, p.size())\n if p.requires_grad:\n c_trained += increment\n c_total += increment\n return c_trained, c_total", "def num_trainable_parameters(self) -> int:\n if vocabulary.is_empty(self.vocab, self.config.features.configured_namespaces):\n self._LOGGER.warning(\n \"At least one vocabulary of your features is still empty! \"\n \"The number of trainable parameters usually depends on the size of your vocabulary.\"\n )\n return sum(p.numel() for p in self._model.parameters() if p.requires_grad)", "def num_parameters(self) -> int:\n return len(self.w) + prod(self.v.shape) - len(self.v)", "def get_n_parameters(self, exclude_pop_model=False):\n if (self._population_model is None) or exclude_pop_model:\n n_parameters = self._mechanistic_model.n_parameters()\n for error_model in self._error_models:\n n_parameters += error_model.n_parameters()\n return n_parameters\n\n return self._population_model.n_parameters()", "def __len__(self) -> int:\n return len(self.parameters)", "def count_params(layer):\n params = get_all_params(layer)\n shapes = [p.get_value().shape for p in params]\n counts = [np.prod(shape) for shape in shapes]\n return sum(counts)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def len_parameters(self):\n return len(self._Parameters._fields)", "def nvar(self):\n return len(self.__vars)", "def local_param_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if not self.symbols[-1][s].isparam: continue\n size += 1\n return size", "def get_params_count(self):\n\t\treturn call_sdk_function('PrlResult_GetParamsCount', self.handle)", "def get_parameter_number(net):\n # print(type(net.parameters()))\n total_num = sum(p.numel() for p in net.parameters())\n trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)\n return {'Total': total_num, 'Trainable': trainable_num}", "def count_params(all_params):\n nparams = len(all_params)\n nparam_vals = 0\n for i in range(nparams):\n param = all_params[i]\n param_shape = tuple(param.get_shape().as_list())\n nparam_vals += np.prod(param_shape)\n return nparam_vals", "def num_vars(self):\n return self.nvars", "def dimensions(self):\n return len(self.parameter_names)", "def count_parameters(model: Tuple[tuple, tuple, tuple, tuple, str]) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.out_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = (self.masks[i].data.abs() > self.epsilon).sum()\n params += (self.out_projs[i].size(0) + self.out_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in AdaptiveSoftmax layer-{}\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def _get_param_size(module: torch.nn.Module):\n return sum([p.numel() * torch.tensor([], dtype=p.dtype).element_size() for p in module.parameters()])", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.emb_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = (self.masks[i].data.abs() > self.epsilon).sum()\n params += (self.emb_projs[i].size(0) + self.emb_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in layer-{} AdaptiveEmbedding\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def get_total_trainable_parameter_size():\n total_parameters = 0\n import tensorflow as tf\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n total_parameters += np.product([x.value for x in variable.get_shape()])\n return total_parameters", "def count_parameters(sess):\n\n variables_names = [v.name for v in tf.trainable_variables()]\n values = sess.run(variables_names)\n n_params = 0\n\n for k, v in zip(variables_names, values):\n print '-'.center(140, '-')\n print '{:60s}\\t\\tShape: {:20s}\\t{:20} parameters'.format(k, v.shape, v.size)\n\n n_params += v.size\n\n print '-'.center(140, '-')\n print 'Total # parameters:\\t\\t{}\\n\\n'.format(n_params)\n\n return n_params", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def getNumParameters(self):\n return _libsbml.Model_getNumParameters(self)", "def num_params(self):\n raise NotImplemented(\"Abstract, please implement in respective classes\")", "def getNumParameters(self):\n return _libsbml.KineticLaw_getNumParameters(self)", "def __len__(self):\n return self.nb_iterations", "def get_num_variables(self):\n return len(self.variables)", "def GetNumberOfParameters(self):\n return _ITKCostFunctionsPython.itkCostFunction_GetNumberOfParameters(self)", "def get_n_params(var_list):\n return int(np.sum([np.product(\n [x.value for x in var.get_shape()]) for var in var_list]))", "def num_vars(self):\n return len(self.bounds.lb)", "def __len__(self) -> int:\n return len(self.variables)", "def num_vars(self):\n return self._nvars", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def count_total_params(model):\n trainable_count = int(\n numpy.sum([K.count_params(p) for p in set(model.trainable_weights)]))\n non_trainable_count = int(\n numpy.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))\n return trainable_count, non_trainable_count", "def __len__(self):\n # Product function that can handle iterables (np.product can't).\n product = partial(reduce, operator.mul)\n return sum(product(len(v) for v in p.values()) if p else 1\n for p in self.param_grid)", "def __len__(self):\n return len(self.params)", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.out_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = self.masks[i].l0_norm()\n params += (self.out_projs[i].size(0) + self.out_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in AdaptiveSoftmax layer-{}\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.emb_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = self.masks[i].l0_norm()\n params += (self.emb_projs[i].size(0) + self.emb_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in layer-{} AdaptiveEmbedding\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def calculate_num_params(self) -> None:\n for name, param in self.module.named_parameters():\n self.num_params += param.nelement()\n self.trainable &= param.requires_grad\n\n if name == \"weight\":\n ksize = list(param.size())\n # to make [in_shape, out_shape, ksize, ksize]\n if len(ksize) > 1:\n ksize[0], ksize[1] = ksize[1], ksize[0]\n self.kernel_size = ksize\n\n # RNN modules have inner weights such as weight_ih_l0\n elif \"weight\" in name:\n self.inner_layers[name] = list(param.size())" ]
[ "0.75405735", "0.75211614", "0.738875", "0.7387541", "0.73810554", "0.73747927", "0.7355472", "0.73516285", "0.73359424", "0.7311632", "0.7311632", "0.7266938", "0.72314495", "0.72314495", "0.72164834", "0.7203205", "0.7203205", "0.7203205", "0.7185691", "0.71791095", "0.7174818", "0.7153565", "0.7147031", "0.71392816", "0.71322024", "0.7110126", "0.7075877", "0.7061964", "0.70428026", "0.7023761", "0.7022947", "0.7002628", "0.69975686", "0.6995272", "0.69509196", "0.69483453", "0.6928968", "0.6928118", "0.69278497", "0.69151187", "0.6909454", "0.6865862", "0.6845796", "0.6845796", "0.68222374", "0.6812105", "0.68063724", "0.6789041", "0.6788805", "0.67466265", "0.6745521", "0.67444295", "0.67302775", "0.6720803", "0.6719991", "0.67177933", "0.6702116", "0.66793615", "0.6676897", "0.6676889", "0.6667523", "0.66626525", "0.6658684", "0.6658684", "0.6658684", "0.6658684", "0.6657855", "0.66550386", "0.66406214", "0.6617321", "0.66127974", "0.6611052", "0.66075385", "0.66049236", "0.65821505", "0.6576277", "0.65751797", "0.65729064", "0.6565648", "0.65637845", "0.6560861", "0.6560861", "0.65540624", "0.65441906", "0.6537034", "0.6536052", "0.65311366", "0.65074724", "0.64652073", "0.64433426", "0.6442025", "0.643742", "0.6431012", "0.6429944", "0.6423827", "0.6415726", "0.6397735", "0.6387696", "0.6358759" ]
0.73142964
9
Get number of parameters.
def num_parameters(self, train=True) -> torch.Tensor: params = torch.tensor(0, dtype=torch.float).to(self.emb_projs[0]) if train: for i in range(len(self.cutoffs)): n_proj = self.masks[i].l0_norm() params += (self.emb_projs[i].size(0) + self.emb_layers[i].weight.size(0)) * n_proj elif self.compiled_projs is not None and self.compiled_embeddings is not None: for i in range(len(self.cutoffs)): if len(self.indices[i]) == 0: warnings.warn("Mask is all zero in layer-{} AdaptiveEmbedding".format(i), RuntimeWarning) else: params += self.compiled_projs[i].numel() + \ self.compiled_embeddings[i].numel() return params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_parameters(self):\n return len(self.parameters)", "def get_num_params(self):\n if self.num_params is None:\n import inspect\n argspec = inspect.getfullargspec(self.get_code())\n if argspec.varargs or argspec.varkw:\n self.num_params = -1\n else:\n self.num_params = len(argspec.args)\n return self.num_params", "def number_of_parameters(self):\n return len(self.parameters)", "def get_num_params(self):\n if self.num_params is None:\n self.num_params = len(self.params)\n return self.num_params", "def num_param(self):\n return len(self._parameters)", "def n_parameters(self):\n return len(self._LIST_PARAMETERS)", "def num_parameters(self) -> int:\n return len(self) * self.convention.value", "def num_params(self) -> int:\n return self._num_params", "def num_params(self):", "def num_params(self):\n return len(self.params)", "def n_parameters(self):\n return self.pdm.n_parameters", "def N(self):\n return len(self.parameters)", "def _n_parameters(self):\n raise NotImplementedError", "def n_parameters(self):\n return sum([p.n_parameters for p in self.parameters])", "def get_parameter_numbers(self) -> int:\n # TODO(jeikeilim): return the number of parameter list of each layers.\n n_param = sum([x.numel() for x in self.model.parameters()])\n return n_param", "def calculate_num_params(self):\n num_params = 0\n for p in self.parameters():\n num_params += p.data.view(-1).size(0)\n return num_params", "def count_params(self):\n self.N = 0\n for name, param in self.model.named_parameters():\n self.N += param.numel()\n self.N_list.append(self.N)", "def get_params_count(self):\n\t\treturn call_sdk_function('PrlResult_GetParamsCount', self.handle)", "def countParam(self):\n return self.decl.args[mpi_array_calls[self.decl.name][self.pos]]", "def n_parameters(self) -> int:\n return nkjax.tree_size(self.parameters)", "def num_parameters(self) -> int:\n if self._model:\n return self._model.num_parameters()\n return 0", "def len_parameters(self):\n return len(self._Parameters._fields)", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters())", "def get_num_parameters(form):\n n_args = len(inspect.signature(form).parameters)\n tree = form(*[SyntaxTreeNode('_' + str(i)) for i in range(n_args)])\n return len(get_unique_parameters(tree))", "def nb_parameters(net):\n return sum(p.numel() for p in net.parameters())", "def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)", "def _get_parameter_count(self):\n parameters_d = 5;\n size_h = self.model.size_h\n return (size_h - 1) + size_h * (\n (size_h - 1) + parameters_d + (self.model.size_aa - 1) + \n (self.model.size_ss - 1) + (self.model.size_cis - 1)\n )", "def GetNumberOfParameters(self):\n return _ITKCostFunctionsPython.itkCostFunction_GetNumberOfParameters(self)", "def num_params(self):\n return np.sum([torch.tensor(param.shape).prod()\n for param in self.parameters()])", "def getNumParameters(self):\n return _libsbml.KineticLaw_getNumParameters(self)", "def __len__(self) -> int:\n return len(self.parameters)", "def num_params(self):\r\n return np.sum([torch.tensor(param.shape).prod()\r\n for param in self.parameters()])", "def getNumParameters(self):\n return _libsbml.Model_getNumParameters(self)", "def dimensions(self):\n return len(self.parameter_names)", "def num_parameters(self) -> int:\n return len(self.w) + prod(self.v.shape) - len(self.v)", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def num_params(self):\n raise NotImplemented(\"Abstract, please implement in respective classes\")", "def count_params(model):\n param_count = np.sum([np.prod(p.size()) for p in model.parameters()])\n return param_count", "def count_parameters(model):\r\n count = 0\r\n for parameter in list(model.parameters()):\r\n subcount = 1\r\n for size in list(parameter.size()):\r\n subcount *= size\r\n count += subcount\r\n return count", "def num_parameters(model):\n return sum([param.nelement() for param in model.parameters()])", "def N(self) -> int:\n return self.params.N", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def count_params():\n param_count = np.sum([np.prod(x.get_shape().as_list()) for x in tf.global_variables()])\n return param_count", "def length(self):\n return int(np.sum([x.length for x in self.parameters]))", "def countParameters(self):\n return sum(p.numel() for p in self.model.parameters() if p.requires_grad)", "def n_variables(self):\n return sum([p.n_variables for p in self.parameters])", "def __len__(self):\n return len(self.params)", "def num_parameters(self) -> int:\n if vocabulary.is_empty(self.vocab, self.config.features.configured_namespaces):\n self._LOGGER.warning(\n \"At least one vocabulary of your features is still empty! \"\n \"The number of trainable parameters usually depends on the size of your vocabulary.\"\n )\n return sum(p.numel() for p in self._model.parameters())", "def num_hyperparameters(self):\n return self._hyperparameters.size", "def num_hyperparameters(self):\n return self._hyperparameters.size", "def _get_param_size(module: torch.nn.Module):\n return sum([p.numel() * torch.tensor([], dtype=p.dtype).element_size() for p in module.parameters()])", "def param_size(module:nn.Module):\n return np.sum(v.numel() for name, v in module.named_parameters() \\\n if \"auxiliary\" not in name)", "def count_params(all_params):\n nparams = len(all_params)\n nparam_vals = 0\n for i in range(nparams):\n param = all_params[i]\n param_shape = tuple(param.get_shape().as_list())\n nparam_vals += np.prod(param_shape)\n return nparam_vals", "def n_elements_one_param(self, param_name):\n p = self._get_one_param(param_name)\n return len(p)", "def count_parameters(also_print=True):\n total = 0\n if also_print:\n logging.info('Model Parameters:')\n for (_, v) in get_vars_to_save_and_restore().items():\n shape = v.get_shape()\n if also_print:\n logging.info('%s %s: %s', v.op.name, shape,\n format_number(shape.num_elements()))\n total += shape.num_elements()\n if also_print:\n logging.info('Total: %s', format_number(total))\n return total", "def local_param_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if not self.symbols[-1][s].isparam: continue\n size += 1\n return size", "def n_params(self, t_id):\n all_params = set()\n for i in range(t_id+1):\n model = self.get_model(i)\n all_params.update(model.parameters())\n all_params.update(model.buffers())\n\n return sum(map(torch.numel, all_params))", "def getNumArguments(self):\n return _libsbml.SBMLExternalValidator_getNumArguments(self)", "def get_num_variables(self):\n return len(self.variables)", "def n_params(model):\n \n n_params=sum([\n np.prod([tensor.size()[k] for k in range(len(tensor.size()))])\n for tensor in list(model.parameters())])\n \n return n_params", "def count_parameters(model, tunable_only: bool = True) -> int:\n if tunable_only:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n else:\n return sum(p.numel() for p in model.parameters())", "def __len__(self):\n if self.args is None:\n return 0\n return len(vars(self.args))", "def count_params(layer):\n params = get_all_params(layer)\n shapes = [p.get_value().shape for p in params]\n counts = [np.prod(shape) for shape in shapes]\n return sum(counts)", "def num_arguments(self) -> int:\n if 'arguments' in self._event:\n return len(self._event['arguments'])\n return 0", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def count_parameters(net):\r\n return sum(p.numel() for p in net.parameters() if p.requires_grad)", "def nVariables(self):\n return len(self.variables)", "def num_vars(self):\n return self.nvars", "def count_layer_params(layer):\n num_params = 0\n name, param_names, dims, _, _ = layer.get_layer_info()\n nparams = len(dims)\n for j in range(nparams):\n num_params += np.prod(dims[j])\n return num_params", "def count_parameters(model):\n\treturn sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model: Tuple[tuple, tuple, tuple, tuple, str]) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def getNumArguments(self):\n return _libsbml.FunctionDefinition_getNumArguments(self)", "def get_num_args(function):\n import inspect\n args = inspect.getfullargspec(function)\n num_args = 0\n if args[0] is not None:\n num_args += len(args[0])\n if 'self' in args[0]:\n num_args -= 1\n if args[1] is not None:\n num_args += len(args[1])\n if args[2] is not None:\n num_args += len(args[2])\n # do not count defaults of keywords conatined in args[3]\n # if args[3] is not None:\n # num_args += len(args[3])\n return num_args", "def get_n_params(var_list):\n return int(np.sum([np.product(\n [x.value for x in var.get_shape()]) for var in var_list]))", "def num_params():\n total_num = 0\n for var in tf.trainable_variables():\n shape = var.get_shape()\n total_num += functools.reduce(operator.mul, [dim.value for dim in shape], 1)\n return total_num", "def count_parameters(sess):\n\n variables_names = [v.name for v in tf.trainable_variables()]\n values = sess.run(variables_names)\n n_params = 0\n\n for k, v in zip(variables_names, values):\n print '-'.center(140, '-')\n print '{:60s}\\t\\tShape: {:20s}\\t{:20} parameters'.format(k, v.shape, v.size)\n\n n_params += v.size\n\n print '-'.center(140, '-')\n print 'Total # parameters:\\t\\t{}\\n\\n'.format(n_params)\n\n return n_params", "def num_vars(self):\n return self._nvars", "def args_count(args: list) -> int:\n\n\treturn len(args)", "def get_arg_count(fun):\n if isclass(fun):\n return len(signature(fun.__call__).parameters)\n return len(signature(fun).parameters)", "def count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"count\")", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def num_params(architecture): #\n \n total_parameters = 0\n for layer in range(1,len(architecture)+1):\n weight_dims = np.shape(architecture['layer{}'.format(layer)][2])\n try:\n params = weight_dims[0]*weight_dims[1]*weight_dims[2]\n except:\n try:\n params = weight_dims[0]*weight_dims[1]\n except:\n try:\n params = weight_dims[0]\n except:\n params = 0\n total_parameters += params\n return total_parameters", "def count_parameters(self) -> Tuple[int, int]:\n c_trained, c_total = 0, 0\n for p in self.parameters():\n increment = reduce(lambda x, y: x * y, p.size())\n if p.requires_grad:\n c_trained += increment\n c_total += increment\n return c_trained, c_total", "def n_global_parameters(self):\n return self.global_transform.n_parameters", "def getNumberOfKeys(self) -> int:\n ...", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def count_objects_of_size(self, n: int, **parameters: int) -> int:", "def count_parms(self):\n min_freq = self.get_high_pass_index()\n rejection = self.rejection_at(np.arange(min_freq, self.nf))\n if rejection.ndim < 2:\n return np.sum(rejection)\n else:\n return np.sum(rejection, axis=1)", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)" ]
[ "0.8879826", "0.8658821", "0.8643426", "0.8621051", "0.8559576", "0.8443666", "0.8400023", "0.83914536", "0.8350942", "0.82651836", "0.8234711", "0.8157073", "0.8155948", "0.8069919", "0.8065736", "0.8064464", "0.8031127", "0.80070335", "0.79923403", "0.79663205", "0.7945741", "0.7935545", "0.7818101", "0.7818101", "0.7818101", "0.78120565", "0.7725746", "0.7723024", "0.77179414", "0.7628572", "0.76046556", "0.7572248", "0.7570735", "0.75700164", "0.7557896", "0.75521433", "0.7539009", "0.75206184", "0.7508909", "0.7508909", "0.7490975", "0.74879545", "0.7417157", "0.7374459", "0.7350927", "0.73338795", "0.73338795", "0.73335385", "0.7220937", "0.71996504", "0.71902335", "0.718394", "0.71831185", "0.7178781", "0.7178781", "0.7155224", "0.7124053", "0.7119426", "0.71173084", "0.70878106", "0.7076072", "0.70697904", "0.70682377", "0.7064471", "0.7053471", "0.70435053", "0.7036322", "0.70234597", "0.7012496", "0.7012374", "0.7012285", "0.7010517", "0.69953436", "0.6975467", "0.69283384", "0.6919441", "0.69165057", "0.6915893", "0.6900342", "0.68976724", "0.6886287", "0.68840647", "0.6871395", "0.68657637", "0.6849131", "0.6837043", "0.6837043", "0.6837043", "0.6837043", "0.6835951", "0.6819374", "0.681689", "0.6787406", "0.6765595", "0.6765595", "0.67568725", "0.67568725", "0.6751849", "0.6710731", "0.6707385", "0.6707385" ]
0.0
-1
Get number of prunable parameters
def num_prunable_parameters(self) -> int: return sum(l.weight.numel() for l in self.emb_layers) + \ sum(weight.numel() for weight in self.emb_projs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_parameters(self):\n return self.pdm.n_parameters", "def num_params(self):", "def get_parameter_numbers(self) -> int:\n # TODO(jeikeilim): return the number of parameter list of each layers.\n n_param = sum([x.numel() for x in self.model.parameters()])\n return n_param", "def n_parameters(self):\n return sum([p.n_parameters for p in self.parameters])", "def n_parameters(self) -> int:\n return nkjax.tree_size(self.parameters)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters())", "def N(self):\n return len(self.parameters)", "def number_of_parameters(self):\n return len(self.parameters)", "def get_num_parameters(self):\n return len(self.parameters)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def count_params(self):\n self.N = 0\n for name, param in self.model.named_parameters():\n self.N += param.numel()\n self.N_list.append(self.N)", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def get_num_params(self):\n if self.num_params is None:\n self.num_params = len(self.params)\n return self.num_params", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def calculate_num_params(self):\n num_params = 0\n for p in self.parameters():\n num_params += p.data.view(-1).size(0)\n return num_params", "def count_parameters(model, tunable_only: bool = True) -> int:\n if tunable_only:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n else:\n return sum(p.numel() for p in model.parameters())", "def n_parameters(self):\n return len(self._LIST_PARAMETERS)", "def num_parameters(self) -> int:\n return len(self) * self.convention.value", "def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)", "def count_params():\n param_count = np.sum([np.prod(x.get_shape().as_list()) for x in tf.global_variables()])\n return param_count", "def _n_parameters(self):\n raise NotImplementedError", "def nb_parameters(net):\n return sum(p.numel() for p in net.parameters())", "def num_parameters(model):\n return sum([param.nelement() for param in model.parameters()])", "def num_params(self) -> int:\n return self._num_params", "def n_variables(self):\n return sum([p.n_variables for p in self.parameters])", "def num_parameters(self) -> int:\n if self._model:\n return self._model.num_parameters()\n return 0", "def num_parameters(self) -> int:\n if vocabulary.is_empty(self.vocab, self.config.features.configured_namespaces):\n self._LOGGER.warning(\n \"At least one vocabulary of your features is still empty! \"\n \"The number of trainable parameters usually depends on the size of your vocabulary.\"\n )\n return sum(p.numel() for p in self._model.parameters())", "def count_params(model):\n param_count = np.sum([np.prod(p.size()) for p in model.parameters()])\n return param_count", "def get_num_params(self):\n if self.num_params is None:\n import inspect\n argspec = inspect.getfullargspec(self.get_code())\n if argspec.varargs or argspec.varkw:\n self.num_params = -1\n else:\n self.num_params = len(argspec.args)\n return self.num_params", "def count_parameters(model):\r\n count = 0\r\n for parameter in list(model.parameters()):\r\n subcount = 1\r\n for size in list(parameter.size()):\r\n subcount *= size\r\n count += subcount\r\n return count", "def num_params(self):\n return len(self.params)", "def num_param(self):\n return len(self._parameters)", "def num_params(self):\r\n return np.sum([torch.tensor(param.shape).prod()\r\n for param in self.parameters()])", "def num_params(self):\n return np.sum([torch.tensor(param.shape).prod()\n for param in self.parameters()])", "def count_parms(self):\n min_freq = self.get_high_pass_index()\n rejection = self.rejection_at(np.arange(min_freq, self.nf))\n if rejection.ndim < 2:\n return np.sum(rejection)\n else:\n return np.sum(rejection, axis=1)", "def countParameters(self):\n return sum(p.numel() for p in self.model.parameters() if p.requires_grad)", "def _get_parameter_count(self):\n parameters_d = 5;\n size_h = self.model.size_h\n return (size_h - 1) + size_h * (\n (size_h - 1) + parameters_d + (self.model.size_aa - 1) + \n (self.model.size_ss - 1) + (self.model.size_cis - 1)\n )", "def count_params(model):\n total = 0\n for x in model.trainable_variables:\n total += np.prod(x.shape)\n return total", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def N(self) -> int:\n return self.params.N", "def countParam(self):\n return self.decl.args[mpi_array_calls[self.decl.name][self.pos]]", "def param_size(module:nn.Module):\n return np.sum(v.numel() for name, v in module.named_parameters() \\\n if \"auxiliary\" not in name)", "def nVariables(self):\n return len(self.variables)", "def count_parameters(also_print=True):\n total = 0\n if also_print:\n logging.info('Model Parameters:')\n for (_, v) in get_vars_to_save_and_restore().items():\n shape = v.get_shape()\n if also_print:\n logging.info('%s %s: %s', v.op.name, shape,\n format_number(shape.num_elements()))\n total += shape.num_elements()\n if also_print:\n logging.info('Total: %s', format_number(total))\n return total", "def count_parameters():\n total_parameters = 0\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n name = variable.name\n shape = variable.get_shape()\n #print(shape)\n #print(len(shape))\n variable_parameters = 1\n for dim in shape:\n #print(dim)\n variable_parameters *= dim.value\n print(name, [dim for dim in shape], variable_parameters)\n total_parameters += variable_parameters\n print('Number of trainable parameters = {}'.format(total_parameters))", "def count_parameters(model):\n\treturn sum(p.numel() for p in model.parameters() if p.requires_grad)", "def n_params(model):\n \n n_params=sum([\n np.prod([tensor.size()[k] for k in range(len(tensor.size()))])\n for tensor in list(model.parameters())])\n \n return n_params", "def n_global_parameters(self):\n return self.global_transform.n_parameters", "def count_parameters(net):\r\n return sum(p.numel() for p in net.parameters() if p.requires_grad)", "def n_params(self, t_id):\n all_params = set()\n for i in range(t_id+1):\n model = self.get_model(i)\n all_params.update(model.parameters())\n all_params.update(model.buffers())\n\n return sum(map(torch.numel, all_params))", "def num_params():\n total_num = 0\n for var in tf.trainable_variables():\n shape = var.get_shape()\n total_num += functools.reduce(operator.mul, [dim.value for dim in shape], 1)\n return total_num", "def count_parameters(self) -> Tuple[int, int]:\n c_trained, c_total = 0, 0\n for p in self.parameters():\n increment = reduce(lambda x, y: x * y, p.size())\n if p.requires_grad:\n c_trained += increment\n c_total += increment\n return c_trained, c_total", "def num_trainable_parameters(self) -> int:\n if vocabulary.is_empty(self.vocab, self.config.features.configured_namespaces):\n self._LOGGER.warning(\n \"At least one vocabulary of your features is still empty! \"\n \"The number of trainable parameters usually depends on the size of your vocabulary.\"\n )\n return sum(p.numel() for p in self._model.parameters() if p.requires_grad)", "def num_parameters(self) -> int:\n return len(self.w) + prod(self.v.shape) - len(self.v)", "def get_n_parameters(self, exclude_pop_model=False):\n if (self._population_model is None) or exclude_pop_model:\n n_parameters = self._mechanistic_model.n_parameters()\n for error_model in self._error_models:\n n_parameters += error_model.n_parameters()\n return n_parameters\n\n return self._population_model.n_parameters()", "def __len__(self) -> int:\n return len(self.parameters)", "def count_params(layer):\n params = get_all_params(layer)\n shapes = [p.get_value().shape for p in params]\n counts = [np.prod(shape) for shape in shapes]\n return sum(counts)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def len_parameters(self):\n return len(self._Parameters._fields)", "def nvar(self):\n return len(self.__vars)", "def local_param_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if not self.symbols[-1][s].isparam: continue\n size += 1\n return size", "def get_params_count(self):\n\t\treturn call_sdk_function('PrlResult_GetParamsCount', self.handle)", "def get_parameter_number(net):\n # print(type(net.parameters()))\n total_num = sum(p.numel() for p in net.parameters())\n trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)\n return {'Total': total_num, 'Trainable': trainable_num}", "def count_params(all_params):\n nparams = len(all_params)\n nparam_vals = 0\n for i in range(nparams):\n param = all_params[i]\n param_shape = tuple(param.get_shape().as_list())\n nparam_vals += np.prod(param_shape)\n return nparam_vals", "def num_vars(self):\n return self.nvars", "def dimensions(self):\n return len(self.parameter_names)", "def count_parameters(model: Tuple[tuple, tuple, tuple, tuple, str]) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.out_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = (self.masks[i].data.abs() > self.epsilon).sum()\n params += (self.out_projs[i].size(0) + self.out_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in AdaptiveSoftmax layer-{}\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def _get_param_size(module: torch.nn.Module):\n return sum([p.numel() * torch.tensor([], dtype=p.dtype).element_size() for p in module.parameters()])", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.emb_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = (self.masks[i].data.abs() > self.epsilon).sum()\n params += (self.emb_projs[i].size(0) + self.emb_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in layer-{} AdaptiveEmbedding\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def get_total_trainable_parameter_size():\n total_parameters = 0\n import tensorflow as tf\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n total_parameters += np.product([x.value for x in variable.get_shape()])\n return total_parameters", "def count_parameters(sess):\n\n variables_names = [v.name for v in tf.trainable_variables()]\n values = sess.run(variables_names)\n n_params = 0\n\n for k, v in zip(variables_names, values):\n print '-'.center(140, '-')\n print '{:60s}\\t\\tShape: {:20s}\\t{:20} parameters'.format(k, v.shape, v.size)\n\n n_params += v.size\n\n print '-'.center(140, '-')\n print 'Total # parameters:\\t\\t{}\\n\\n'.format(n_params)\n\n return n_params", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def getNumParameters(self):\n return _libsbml.Model_getNumParameters(self)", "def num_params(self):\n raise NotImplemented(\"Abstract, please implement in respective classes\")", "def getNumParameters(self):\n return _libsbml.KineticLaw_getNumParameters(self)", "def __len__(self):\n return self.nb_iterations", "def get_num_variables(self):\n return len(self.variables)", "def GetNumberOfParameters(self):\n return _ITKCostFunctionsPython.itkCostFunction_GetNumberOfParameters(self)", "def get_n_params(var_list):\n return int(np.sum([np.product(\n [x.value for x in var.get_shape()]) for var in var_list]))", "def num_vars(self):\n return len(self.bounds.lb)", "def __len__(self) -> int:\n return len(self.variables)", "def num_vars(self):\n return self._nvars", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def count_total_params(model):\n trainable_count = int(\n numpy.sum([K.count_params(p) for p in set(model.trainable_weights)]))\n non_trainable_count = int(\n numpy.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))\n return trainable_count, non_trainable_count", "def __len__(self):\n # Product function that can handle iterables (np.product can't).\n product = partial(reduce, operator.mul)\n return sum(product(len(v) for v in p.values()) if p else 1\n for p in self.param_grid)", "def __len__(self):\n return len(self.params)", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.out_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = self.masks[i].l0_norm()\n params += (self.out_projs[i].size(0) + self.out_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in AdaptiveSoftmax layer-{}\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.emb_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = self.masks[i].l0_norm()\n params += (self.emb_projs[i].size(0) + self.emb_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in layer-{} AdaptiveEmbedding\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def calculate_num_params(self) -> None:\n for name, param in self.module.named_parameters():\n self.num_params += param.nelement()\n self.trainable &= param.requires_grad\n\n if name == \"weight\":\n ksize = list(param.size())\n # to make [in_shape, out_shape, ksize, ksize]\n if len(ksize) > 1:\n ksize[0], ksize[1] = ksize[1], ksize[0]\n self.kernel_size = ksize\n\n # RNN modules have inner weights such as weight_ih_l0\n elif \"weight\" in name:\n self.inner_layers[name] = list(param.size())" ]
[ "0.75405735", "0.75211614", "0.738875", "0.7387541", "0.73810554", "0.73747927", "0.7355472", "0.73516285", "0.73359424", "0.7311632", "0.7311632", "0.7266938", "0.72314495", "0.72314495", "0.72164834", "0.7203205", "0.7203205", "0.7203205", "0.7185691", "0.71791095", "0.7174818", "0.7153565", "0.7147031", "0.71392816", "0.71322024", "0.7110126", "0.7075877", "0.7061964", "0.70428026", "0.7023761", "0.7022947", "0.7002628", "0.69975686", "0.6995272", "0.69509196", "0.69483453", "0.6928968", "0.6928118", "0.69278497", "0.69151187", "0.6909454", "0.6865862", "0.6845796", "0.6845796", "0.68222374", "0.6812105", "0.68063724", "0.6789041", "0.6788805", "0.67466265", "0.6745521", "0.67444295", "0.67302775", "0.6720803", "0.6719991", "0.67177933", "0.6702116", "0.66793615", "0.6676897", "0.6676889", "0.6667523", "0.66626525", "0.6658684", "0.6658684", "0.6658684", "0.6658684", "0.6657855", "0.66550386", "0.66406214", "0.6617321", "0.66127974", "0.6611052", "0.66075385", "0.66049236", "0.65821505", "0.6576277", "0.65751797", "0.65729064", "0.6565648", "0.65637845", "0.6560861", "0.6560861", "0.65540624", "0.65441906", "0.6537034", "0.6536052", "0.65311366", "0.65074724", "0.64652073", "0.64433426", "0.6442025", "0.643742", "0.6431012", "0.6429944", "0.6423827", "0.6415726", "0.6397735", "0.6387696", "0.6358759" ]
0.73142964
10
Get number of parameters.
def num_parameters(self, train=True) -> torch.Tensor: params = torch.tensor(0, dtype=torch.float).to(self.emb_projs[0]) if train: for i in range(len(self.cutoffs)): n_proj = (self.masks[i].data.abs() > self.epsilon).sum() params += (self.emb_projs[i].size(0) + self.emb_layers[i].weight.size(0)) * n_proj elif self.compiled_projs is not None and self.compiled_embeddings is not None: for i in range(len(self.cutoffs)): if len(self.indices[i]) == 0: warnings.warn("Mask is all zero in layer-{} AdaptiveEmbedding".format(i), RuntimeWarning) else: params += self.compiled_projs[i].numel() + \ self.compiled_embeddings[i].numel() return params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_parameters(self):\n return len(self.parameters)", "def get_num_params(self):\n if self.num_params is None:\n import inspect\n argspec = inspect.getfullargspec(self.get_code())\n if argspec.varargs or argspec.varkw:\n self.num_params = -1\n else:\n self.num_params = len(argspec.args)\n return self.num_params", "def number_of_parameters(self):\n return len(self.parameters)", "def get_num_params(self):\n if self.num_params is None:\n self.num_params = len(self.params)\n return self.num_params", "def num_param(self):\n return len(self._parameters)", "def n_parameters(self):\n return len(self._LIST_PARAMETERS)", "def num_parameters(self) -> int:\n return len(self) * self.convention.value", "def num_params(self) -> int:\n return self._num_params", "def num_params(self):", "def num_params(self):\n return len(self.params)", "def n_parameters(self):\n return self.pdm.n_parameters", "def N(self):\n return len(self.parameters)", "def _n_parameters(self):\n raise NotImplementedError", "def n_parameters(self):\n return sum([p.n_parameters for p in self.parameters])", "def get_parameter_numbers(self) -> int:\n # TODO(jeikeilim): return the number of parameter list of each layers.\n n_param = sum([x.numel() for x in self.model.parameters()])\n return n_param", "def calculate_num_params(self):\n num_params = 0\n for p in self.parameters():\n num_params += p.data.view(-1).size(0)\n return num_params", "def count_params(self):\n self.N = 0\n for name, param in self.model.named_parameters():\n self.N += param.numel()\n self.N_list.append(self.N)", "def get_params_count(self):\n\t\treturn call_sdk_function('PrlResult_GetParamsCount', self.handle)", "def countParam(self):\n return self.decl.args[mpi_array_calls[self.decl.name][self.pos]]", "def n_parameters(self) -> int:\n return nkjax.tree_size(self.parameters)", "def num_parameters(self) -> int:\n if self._model:\n return self._model.num_parameters()\n return 0", "def len_parameters(self):\n return len(self._Parameters._fields)", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters())", "def get_num_parameters(form):\n n_args = len(inspect.signature(form).parameters)\n tree = form(*[SyntaxTreeNode('_' + str(i)) for i in range(n_args)])\n return len(get_unique_parameters(tree))", "def nb_parameters(net):\n return sum(p.numel() for p in net.parameters())", "def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)", "def _get_parameter_count(self):\n parameters_d = 5;\n size_h = self.model.size_h\n return (size_h - 1) + size_h * (\n (size_h - 1) + parameters_d + (self.model.size_aa - 1) + \n (self.model.size_ss - 1) + (self.model.size_cis - 1)\n )", "def GetNumberOfParameters(self):\n return _ITKCostFunctionsPython.itkCostFunction_GetNumberOfParameters(self)", "def num_params(self):\n return np.sum([torch.tensor(param.shape).prod()\n for param in self.parameters()])", "def getNumParameters(self):\n return _libsbml.KineticLaw_getNumParameters(self)", "def __len__(self) -> int:\n return len(self.parameters)", "def num_params(self):\r\n return np.sum([torch.tensor(param.shape).prod()\r\n for param in self.parameters()])", "def getNumParameters(self):\n return _libsbml.Model_getNumParameters(self)", "def dimensions(self):\n return len(self.parameter_names)", "def num_parameters(self) -> int:\n return len(self.w) + prod(self.v.shape) - len(self.v)", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def num_params(self):\n raise NotImplemented(\"Abstract, please implement in respective classes\")", "def count_params(model):\n param_count = np.sum([np.prod(p.size()) for p in model.parameters()])\n return param_count", "def count_parameters(model):\r\n count = 0\r\n for parameter in list(model.parameters()):\r\n subcount = 1\r\n for size in list(parameter.size()):\r\n subcount *= size\r\n count += subcount\r\n return count", "def num_parameters(model):\n return sum([param.nelement() for param in model.parameters()])", "def N(self) -> int:\n return self.params.N", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def count_params():\n param_count = np.sum([np.prod(x.get_shape().as_list()) for x in tf.global_variables()])\n return param_count", "def length(self):\n return int(np.sum([x.length for x in self.parameters]))", "def countParameters(self):\n return sum(p.numel() for p in self.model.parameters() if p.requires_grad)", "def n_variables(self):\n return sum([p.n_variables for p in self.parameters])", "def __len__(self):\n return len(self.params)", "def num_parameters(self) -> int:\n if vocabulary.is_empty(self.vocab, self.config.features.configured_namespaces):\n self._LOGGER.warning(\n \"At least one vocabulary of your features is still empty! \"\n \"The number of trainable parameters usually depends on the size of your vocabulary.\"\n )\n return sum(p.numel() for p in self._model.parameters())", "def num_hyperparameters(self):\n return self._hyperparameters.size", "def num_hyperparameters(self):\n return self._hyperparameters.size", "def _get_param_size(module: torch.nn.Module):\n return sum([p.numel() * torch.tensor([], dtype=p.dtype).element_size() for p in module.parameters()])", "def param_size(module:nn.Module):\n return np.sum(v.numel() for name, v in module.named_parameters() \\\n if \"auxiliary\" not in name)", "def count_params(all_params):\n nparams = len(all_params)\n nparam_vals = 0\n for i in range(nparams):\n param = all_params[i]\n param_shape = tuple(param.get_shape().as_list())\n nparam_vals += np.prod(param_shape)\n return nparam_vals", "def n_elements_one_param(self, param_name):\n p = self._get_one_param(param_name)\n return len(p)", "def count_parameters(also_print=True):\n total = 0\n if also_print:\n logging.info('Model Parameters:')\n for (_, v) in get_vars_to_save_and_restore().items():\n shape = v.get_shape()\n if also_print:\n logging.info('%s %s: %s', v.op.name, shape,\n format_number(shape.num_elements()))\n total += shape.num_elements()\n if also_print:\n logging.info('Total: %s', format_number(total))\n return total", "def local_param_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if not self.symbols[-1][s].isparam: continue\n size += 1\n return size", "def n_params(self, t_id):\n all_params = set()\n for i in range(t_id+1):\n model = self.get_model(i)\n all_params.update(model.parameters())\n all_params.update(model.buffers())\n\n return sum(map(torch.numel, all_params))", "def getNumArguments(self):\n return _libsbml.SBMLExternalValidator_getNumArguments(self)", "def get_num_variables(self):\n return len(self.variables)", "def n_params(model):\n \n n_params=sum([\n np.prod([tensor.size()[k] for k in range(len(tensor.size()))])\n for tensor in list(model.parameters())])\n \n return n_params", "def count_parameters(model, tunable_only: bool = True) -> int:\n if tunable_only:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n else:\n return sum(p.numel() for p in model.parameters())", "def __len__(self):\n if self.args is None:\n return 0\n return len(vars(self.args))", "def count_params(layer):\n params = get_all_params(layer)\n shapes = [p.get_value().shape for p in params]\n counts = [np.prod(shape) for shape in shapes]\n return sum(counts)", "def num_arguments(self) -> int:\n if 'arguments' in self._event:\n return len(self._event['arguments'])\n return 0", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def count_parameters(net):\r\n return sum(p.numel() for p in net.parameters() if p.requires_grad)", "def nVariables(self):\n return len(self.variables)", "def num_vars(self):\n return self.nvars", "def count_layer_params(layer):\n num_params = 0\n name, param_names, dims, _, _ = layer.get_layer_info()\n nparams = len(dims)\n for j in range(nparams):\n num_params += np.prod(dims[j])\n return num_params", "def count_parameters(model):\n\treturn sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model: Tuple[tuple, tuple, tuple, tuple, str]) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def getNumArguments(self):\n return _libsbml.FunctionDefinition_getNumArguments(self)", "def get_num_args(function):\n import inspect\n args = inspect.getfullargspec(function)\n num_args = 0\n if args[0] is not None:\n num_args += len(args[0])\n if 'self' in args[0]:\n num_args -= 1\n if args[1] is not None:\n num_args += len(args[1])\n if args[2] is not None:\n num_args += len(args[2])\n # do not count defaults of keywords conatined in args[3]\n # if args[3] is not None:\n # num_args += len(args[3])\n return num_args", "def get_n_params(var_list):\n return int(np.sum([np.product(\n [x.value for x in var.get_shape()]) for var in var_list]))", "def num_params():\n total_num = 0\n for var in tf.trainable_variables():\n shape = var.get_shape()\n total_num += functools.reduce(operator.mul, [dim.value for dim in shape], 1)\n return total_num", "def count_parameters(sess):\n\n variables_names = [v.name for v in tf.trainable_variables()]\n values = sess.run(variables_names)\n n_params = 0\n\n for k, v in zip(variables_names, values):\n print '-'.center(140, '-')\n print '{:60s}\\t\\tShape: {:20s}\\t{:20} parameters'.format(k, v.shape, v.size)\n\n n_params += v.size\n\n print '-'.center(140, '-')\n print 'Total # parameters:\\t\\t{}\\n\\n'.format(n_params)\n\n return n_params", "def num_vars(self):\n return self._nvars", "def args_count(args: list) -> int:\n\n\treturn len(args)", "def get_arg_count(fun):\n if isclass(fun):\n return len(signature(fun.__call__).parameters)\n return len(signature(fun).parameters)", "def count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"count\")", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def num_params(architecture): #\n \n total_parameters = 0\n for layer in range(1,len(architecture)+1):\n weight_dims = np.shape(architecture['layer{}'.format(layer)][2])\n try:\n params = weight_dims[0]*weight_dims[1]*weight_dims[2]\n except:\n try:\n params = weight_dims[0]*weight_dims[1]\n except:\n try:\n params = weight_dims[0]\n except:\n params = 0\n total_parameters += params\n return total_parameters", "def count_parameters(self) -> Tuple[int, int]:\n c_trained, c_total = 0, 0\n for p in self.parameters():\n increment = reduce(lambda x, y: x * y, p.size())\n if p.requires_grad:\n c_trained += increment\n c_total += increment\n return c_trained, c_total", "def n_global_parameters(self):\n return self.global_transform.n_parameters", "def getNumberOfKeys(self) -> int:\n ...", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def count_objects_of_size(self, n: int, **parameters: int) -> int:", "def count_parms(self):\n min_freq = self.get_high_pass_index()\n rejection = self.rejection_at(np.arange(min_freq, self.nf))\n if rejection.ndim < 2:\n return np.sum(rejection)\n else:\n return np.sum(rejection, axis=1)", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)" ]
[ "0.8879826", "0.8658821", "0.8643426", "0.8621051", "0.8559576", "0.8443666", "0.8400023", "0.83914536", "0.8350942", "0.82651836", "0.8234711", "0.8157073", "0.8155948", "0.8069919", "0.8065736", "0.8064464", "0.8031127", "0.80070335", "0.79923403", "0.79663205", "0.7945741", "0.7935545", "0.7818101", "0.7818101", "0.7818101", "0.78120565", "0.7725746", "0.7723024", "0.77179414", "0.7628572", "0.76046556", "0.7572248", "0.7570735", "0.75700164", "0.7557896", "0.75521433", "0.7539009", "0.75206184", "0.7508909", "0.7508909", "0.7490975", "0.74879545", "0.7417157", "0.7374459", "0.7350927", "0.73338795", "0.73338795", "0.73335385", "0.7220937", "0.71996504", "0.71902335", "0.718394", "0.71831185", "0.7178781", "0.7178781", "0.7155224", "0.7124053", "0.7119426", "0.71173084", "0.70878106", "0.7076072", "0.70697904", "0.70682377", "0.7064471", "0.7053471", "0.70435053", "0.7036322", "0.70234597", "0.7012496", "0.7012374", "0.7012285", "0.7010517", "0.69953436", "0.6975467", "0.69283384", "0.6919441", "0.69165057", "0.6915893", "0.6900342", "0.68976724", "0.6886287", "0.68840647", "0.6871395", "0.68657637", "0.6849131", "0.6837043", "0.6837043", "0.6837043", "0.6837043", "0.6835951", "0.6819374", "0.681689", "0.6787406", "0.6765595", "0.6765595", "0.67568725", "0.67568725", "0.6751849", "0.6710731", "0.6707385", "0.6707385" ]
0.0
-1
Get number of prunable parameters
def num_prunable_parameters(self) -> int: return sum(l.weight.numel() for l in self.out_layers) + \ sum(weight.numel() for weight in self.out_projs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_parameters(self):\n return self.pdm.n_parameters", "def num_params(self):", "def get_parameter_numbers(self) -> int:\n # TODO(jeikeilim): return the number of parameter list of each layers.\n n_param = sum([x.numel() for x in self.model.parameters()])\n return n_param", "def n_parameters(self):\n return sum([p.n_parameters for p in self.parameters])", "def n_parameters(self) -> int:\n return nkjax.tree_size(self.parameters)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters())", "def N(self):\n return len(self.parameters)", "def number_of_parameters(self):\n return len(self.parameters)", "def get_num_parameters(self):\n return len(self.parameters)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def count_params(self):\n self.N = 0\n for name, param in self.model.named_parameters():\n self.N += param.numel()\n self.N_list.append(self.N)", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def get_num_params(self):\n if self.num_params is None:\n self.num_params = len(self.params)\n return self.num_params", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def calculate_num_params(self):\n num_params = 0\n for p in self.parameters():\n num_params += p.data.view(-1).size(0)\n return num_params", "def count_parameters(model, tunable_only: bool = True) -> int:\n if tunable_only:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n else:\n return sum(p.numel() for p in model.parameters())", "def n_parameters(self):\n return len(self._LIST_PARAMETERS)", "def num_parameters(self) -> int:\n return len(self) * self.convention.value", "def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)", "def count_params():\n param_count = np.sum([np.prod(x.get_shape().as_list()) for x in tf.global_variables()])\n return param_count", "def _n_parameters(self):\n raise NotImplementedError", "def nb_parameters(net):\n return sum(p.numel() for p in net.parameters())", "def num_parameters(model):\n return sum([param.nelement() for param in model.parameters()])", "def num_params(self) -> int:\n return self._num_params", "def n_variables(self):\n return sum([p.n_variables for p in self.parameters])", "def num_parameters(self) -> int:\n if self._model:\n return self._model.num_parameters()\n return 0", "def num_parameters(self) -> int:\n if vocabulary.is_empty(self.vocab, self.config.features.configured_namespaces):\n self._LOGGER.warning(\n \"At least one vocabulary of your features is still empty! \"\n \"The number of trainable parameters usually depends on the size of your vocabulary.\"\n )\n return sum(p.numel() for p in self._model.parameters())", "def count_params(model):\n param_count = np.sum([np.prod(p.size()) for p in model.parameters()])\n return param_count", "def get_num_params(self):\n if self.num_params is None:\n import inspect\n argspec = inspect.getfullargspec(self.get_code())\n if argspec.varargs or argspec.varkw:\n self.num_params = -1\n else:\n self.num_params = len(argspec.args)\n return self.num_params", "def count_parameters(model):\r\n count = 0\r\n for parameter in list(model.parameters()):\r\n subcount = 1\r\n for size in list(parameter.size()):\r\n subcount *= size\r\n count += subcount\r\n return count", "def num_params(self):\n return len(self.params)", "def num_param(self):\n return len(self._parameters)", "def num_params(self):\r\n return np.sum([torch.tensor(param.shape).prod()\r\n for param in self.parameters()])", "def num_params(self):\n return np.sum([torch.tensor(param.shape).prod()\n for param in self.parameters()])", "def count_parms(self):\n min_freq = self.get_high_pass_index()\n rejection = self.rejection_at(np.arange(min_freq, self.nf))\n if rejection.ndim < 2:\n return np.sum(rejection)\n else:\n return np.sum(rejection, axis=1)", "def countParameters(self):\n return sum(p.numel() for p in self.model.parameters() if p.requires_grad)", "def _get_parameter_count(self):\n parameters_d = 5;\n size_h = self.model.size_h\n return (size_h - 1) + size_h * (\n (size_h - 1) + parameters_d + (self.model.size_aa - 1) + \n (self.model.size_ss - 1) + (self.model.size_cis - 1)\n )", "def count_params(model):\n total = 0\n for x in model.trainable_variables:\n total += np.prod(x.shape)\n return total", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def N(self) -> int:\n return self.params.N", "def countParam(self):\n return self.decl.args[mpi_array_calls[self.decl.name][self.pos]]", "def param_size(module:nn.Module):\n return np.sum(v.numel() for name, v in module.named_parameters() \\\n if \"auxiliary\" not in name)", "def nVariables(self):\n return len(self.variables)", "def count_parameters(also_print=True):\n total = 0\n if also_print:\n logging.info('Model Parameters:')\n for (_, v) in get_vars_to_save_and_restore().items():\n shape = v.get_shape()\n if also_print:\n logging.info('%s %s: %s', v.op.name, shape,\n format_number(shape.num_elements()))\n total += shape.num_elements()\n if also_print:\n logging.info('Total: %s', format_number(total))\n return total", "def count_parameters():\n total_parameters = 0\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n name = variable.name\n shape = variable.get_shape()\n #print(shape)\n #print(len(shape))\n variable_parameters = 1\n for dim in shape:\n #print(dim)\n variable_parameters *= dim.value\n print(name, [dim for dim in shape], variable_parameters)\n total_parameters += variable_parameters\n print('Number of trainable parameters = {}'.format(total_parameters))", "def count_parameters(model):\n\treturn sum(p.numel() for p in model.parameters() if p.requires_grad)", "def n_params(model):\n \n n_params=sum([\n np.prod([tensor.size()[k] for k in range(len(tensor.size()))])\n for tensor in list(model.parameters())])\n \n return n_params", "def n_global_parameters(self):\n return self.global_transform.n_parameters", "def count_parameters(net):\r\n return sum(p.numel() for p in net.parameters() if p.requires_grad)", "def n_params(self, t_id):\n all_params = set()\n for i in range(t_id+1):\n model = self.get_model(i)\n all_params.update(model.parameters())\n all_params.update(model.buffers())\n\n return sum(map(torch.numel, all_params))", "def num_params():\n total_num = 0\n for var in tf.trainable_variables():\n shape = var.get_shape()\n total_num += functools.reduce(operator.mul, [dim.value for dim in shape], 1)\n return total_num", "def count_parameters(self) -> Tuple[int, int]:\n c_trained, c_total = 0, 0\n for p in self.parameters():\n increment = reduce(lambda x, y: x * y, p.size())\n if p.requires_grad:\n c_trained += increment\n c_total += increment\n return c_trained, c_total", "def num_trainable_parameters(self) -> int:\n if vocabulary.is_empty(self.vocab, self.config.features.configured_namespaces):\n self._LOGGER.warning(\n \"At least one vocabulary of your features is still empty! \"\n \"The number of trainable parameters usually depends on the size of your vocabulary.\"\n )\n return sum(p.numel() for p in self._model.parameters() if p.requires_grad)", "def num_parameters(self) -> int:\n return len(self.w) + prod(self.v.shape) - len(self.v)", "def get_n_parameters(self, exclude_pop_model=False):\n if (self._population_model is None) or exclude_pop_model:\n n_parameters = self._mechanistic_model.n_parameters()\n for error_model in self._error_models:\n n_parameters += error_model.n_parameters()\n return n_parameters\n\n return self._population_model.n_parameters()", "def __len__(self) -> int:\n return len(self.parameters)", "def count_params(layer):\n params = get_all_params(layer)\n shapes = [p.get_value().shape for p in params]\n counts = [np.prod(shape) for shape in shapes]\n return sum(counts)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def len_parameters(self):\n return len(self._Parameters._fields)", "def nvar(self):\n return len(self.__vars)", "def local_param_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if not self.symbols[-1][s].isparam: continue\n size += 1\n return size", "def get_params_count(self):\n\t\treturn call_sdk_function('PrlResult_GetParamsCount', self.handle)", "def get_parameter_number(net):\n # print(type(net.parameters()))\n total_num = sum(p.numel() for p in net.parameters())\n trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)\n return {'Total': total_num, 'Trainable': trainable_num}", "def count_params(all_params):\n nparams = len(all_params)\n nparam_vals = 0\n for i in range(nparams):\n param = all_params[i]\n param_shape = tuple(param.get_shape().as_list())\n nparam_vals += np.prod(param_shape)\n return nparam_vals", "def num_vars(self):\n return self.nvars", "def dimensions(self):\n return len(self.parameter_names)", "def count_parameters(model: Tuple[tuple, tuple, tuple, tuple, str]) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.out_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = (self.masks[i].data.abs() > self.epsilon).sum()\n params += (self.out_projs[i].size(0) + self.out_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in AdaptiveSoftmax layer-{}\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def _get_param_size(module: torch.nn.Module):\n return sum([p.numel() * torch.tensor([], dtype=p.dtype).element_size() for p in module.parameters()])", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.emb_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = (self.masks[i].data.abs() > self.epsilon).sum()\n params += (self.emb_projs[i].size(0) + self.emb_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in layer-{} AdaptiveEmbedding\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def get_total_trainable_parameter_size():\n total_parameters = 0\n import tensorflow as tf\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n total_parameters += np.product([x.value for x in variable.get_shape()])\n return total_parameters", "def count_parameters(sess):\n\n variables_names = [v.name for v in tf.trainable_variables()]\n values = sess.run(variables_names)\n n_params = 0\n\n for k, v in zip(variables_names, values):\n print '-'.center(140, '-')\n print '{:60s}\\t\\tShape: {:20s}\\t{:20} parameters'.format(k, v.shape, v.size)\n\n n_params += v.size\n\n print '-'.center(140, '-')\n print 'Total # parameters:\\t\\t{}\\n\\n'.format(n_params)\n\n return n_params", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def getNumParameters(self):\n return _libsbml.Model_getNumParameters(self)", "def num_params(self):\n raise NotImplemented(\"Abstract, please implement in respective classes\")", "def getNumParameters(self):\n return _libsbml.KineticLaw_getNumParameters(self)", "def __len__(self):\n return self.nb_iterations", "def get_num_variables(self):\n return len(self.variables)", "def GetNumberOfParameters(self):\n return _ITKCostFunctionsPython.itkCostFunction_GetNumberOfParameters(self)", "def get_n_params(var_list):\n return int(np.sum([np.product(\n [x.value for x in var.get_shape()]) for var in var_list]))", "def num_vars(self):\n return len(self.bounds.lb)", "def __len__(self) -> int:\n return len(self.variables)", "def num_vars(self):\n return self._nvars", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def count_total_params(model):\n trainable_count = int(\n numpy.sum([K.count_params(p) for p in set(model.trainable_weights)]))\n non_trainable_count = int(\n numpy.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))\n return trainable_count, non_trainable_count", "def __len__(self):\n # Product function that can handle iterables (np.product can't).\n product = partial(reduce, operator.mul)\n return sum(product(len(v) for v in p.values()) if p else 1\n for p in self.param_grid)", "def __len__(self):\n return len(self.params)", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.out_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = self.masks[i].l0_norm()\n params += (self.out_projs[i].size(0) + self.out_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in AdaptiveSoftmax layer-{}\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.emb_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = self.masks[i].l0_norm()\n params += (self.emb_projs[i].size(0) + self.emb_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in layer-{} AdaptiveEmbedding\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def calculate_num_params(self) -> None:\n for name, param in self.module.named_parameters():\n self.num_params += param.nelement()\n self.trainable &= param.requires_grad\n\n if name == \"weight\":\n ksize = list(param.size())\n # to make [in_shape, out_shape, ksize, ksize]\n if len(ksize) > 1:\n ksize[0], ksize[1] = ksize[1], ksize[0]\n self.kernel_size = ksize\n\n # RNN modules have inner weights such as weight_ih_l0\n elif \"weight\" in name:\n self.inner_layers[name] = list(param.size())" ]
[ "0.75405735", "0.75211614", "0.738875", "0.7387541", "0.73810554", "0.73747927", "0.7355472", "0.73516285", "0.73359424", "0.73142964", "0.73142964", "0.7266938", "0.72314495", "0.72314495", "0.72164834", "0.7203205", "0.7203205", "0.7203205", "0.7185691", "0.71791095", "0.7174818", "0.7153565", "0.7147031", "0.71392816", "0.71322024", "0.7110126", "0.7075877", "0.7061964", "0.70428026", "0.7023761", "0.7022947", "0.7002628", "0.69975686", "0.6995272", "0.69509196", "0.69483453", "0.6928968", "0.6928118", "0.69278497", "0.69151187", "0.6909454", "0.6865862", "0.6845796", "0.6845796", "0.68222374", "0.6812105", "0.68063724", "0.6789041", "0.6788805", "0.67466265", "0.6745521", "0.67444295", "0.67302775", "0.6720803", "0.6719991", "0.67177933", "0.6702116", "0.66793615", "0.6676897", "0.6676889", "0.6667523", "0.66626525", "0.6658684", "0.6658684", "0.6658684", "0.6658684", "0.6657855", "0.66550386", "0.66406214", "0.6617321", "0.66127974", "0.6611052", "0.66075385", "0.66049236", "0.65821505", "0.6576277", "0.65751797", "0.65729064", "0.6565648", "0.65637845", "0.6560861", "0.6560861", "0.65540624", "0.65441906", "0.6537034", "0.6536052", "0.65311366", "0.65074724", "0.64652073", "0.64433426", "0.6442025", "0.643742", "0.6431012", "0.6429944", "0.6423827", "0.6415726", "0.6397735", "0.6387696", "0.6358759" ]
0.7311632
12
Get number of parameters.
def num_parameters(self, train=True) -> torch.Tensor: params = torch.tensor(0, dtype=torch.float).to(self.out_projs[0]) if train: for i in range(len(self.cutoffs)): n_proj = self.masks[i].l0_norm() params += (self.out_projs[i].size(0) + self.out_layers[i].weight.size(0)) * n_proj elif self.compiled_projs is not None and self.compiled_embeddings is not None: for i in range(len(self.cutoffs)): if len(self.indices[i]) == 0: warnings.warn("Mask is all zero in AdaptiveSoftmax layer-{}".format(i), RuntimeWarning) else: params += self.compiled_projs[i].numel() + \ self.compiled_embeddings[i].numel() return params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_parameters(self):\n return len(self.parameters)", "def get_num_params(self):\n if self.num_params is None:\n import inspect\n argspec = inspect.getfullargspec(self.get_code())\n if argspec.varargs or argspec.varkw:\n self.num_params = -1\n else:\n self.num_params = len(argspec.args)\n return self.num_params", "def number_of_parameters(self):\n return len(self.parameters)", "def get_num_params(self):\n if self.num_params is None:\n self.num_params = len(self.params)\n return self.num_params", "def num_param(self):\n return len(self._parameters)", "def n_parameters(self):\n return len(self._LIST_PARAMETERS)", "def num_parameters(self) -> int:\n return len(self) * self.convention.value", "def num_params(self) -> int:\n return self._num_params", "def num_params(self):", "def num_params(self):\n return len(self.params)", "def n_parameters(self):\n return self.pdm.n_parameters", "def N(self):\n return len(self.parameters)", "def _n_parameters(self):\n raise NotImplementedError", "def n_parameters(self):\n return sum([p.n_parameters for p in self.parameters])", "def get_parameter_numbers(self) -> int:\n # TODO(jeikeilim): return the number of parameter list of each layers.\n n_param = sum([x.numel() for x in self.model.parameters()])\n return n_param", "def calculate_num_params(self):\n num_params = 0\n for p in self.parameters():\n num_params += p.data.view(-1).size(0)\n return num_params", "def count_params(self):\n self.N = 0\n for name, param in self.model.named_parameters():\n self.N += param.numel()\n self.N_list.append(self.N)", "def get_params_count(self):\n\t\treturn call_sdk_function('PrlResult_GetParamsCount', self.handle)", "def countParam(self):\n return self.decl.args[mpi_array_calls[self.decl.name][self.pos]]", "def n_parameters(self) -> int:\n return nkjax.tree_size(self.parameters)", "def num_parameters(self) -> int:\n if self._model:\n return self._model.num_parameters()\n return 0", "def len_parameters(self):\n return len(self._Parameters._fields)", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters())", "def get_num_parameters(form):\n n_args = len(inspect.signature(form).parameters)\n tree = form(*[SyntaxTreeNode('_' + str(i)) for i in range(n_args)])\n return len(get_unique_parameters(tree))", "def nb_parameters(net):\n return sum(p.numel() for p in net.parameters())", "def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)", "def _get_parameter_count(self):\n parameters_d = 5;\n size_h = self.model.size_h\n return (size_h - 1) + size_h * (\n (size_h - 1) + parameters_d + (self.model.size_aa - 1) + \n (self.model.size_ss - 1) + (self.model.size_cis - 1)\n )", "def GetNumberOfParameters(self):\n return _ITKCostFunctionsPython.itkCostFunction_GetNumberOfParameters(self)", "def num_params(self):\n return np.sum([torch.tensor(param.shape).prod()\n for param in self.parameters()])", "def getNumParameters(self):\n return _libsbml.KineticLaw_getNumParameters(self)", "def __len__(self) -> int:\n return len(self.parameters)", "def num_params(self):\r\n return np.sum([torch.tensor(param.shape).prod()\r\n for param in self.parameters()])", "def getNumParameters(self):\n return _libsbml.Model_getNumParameters(self)", "def dimensions(self):\n return len(self.parameter_names)", "def num_parameters(self) -> int:\n return len(self.w) + prod(self.v.shape) - len(self.v)", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def num_params(self):\n raise NotImplemented(\"Abstract, please implement in respective classes\")", "def count_params(model):\n param_count = np.sum([np.prod(p.size()) for p in model.parameters()])\n return param_count", "def count_parameters(model):\r\n count = 0\r\n for parameter in list(model.parameters()):\r\n subcount = 1\r\n for size in list(parameter.size()):\r\n subcount *= size\r\n count += subcount\r\n return count", "def num_parameters(model):\n return sum([param.nelement() for param in model.parameters()])", "def N(self) -> int:\n return self.params.N", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def count_params():\n param_count = np.sum([np.prod(x.get_shape().as_list()) for x in tf.global_variables()])\n return param_count", "def length(self):\n return int(np.sum([x.length for x in self.parameters]))", "def countParameters(self):\n return sum(p.numel() for p in self.model.parameters() if p.requires_grad)", "def n_variables(self):\n return sum([p.n_variables for p in self.parameters])", "def __len__(self):\n return len(self.params)", "def num_parameters(self) -> int:\n if vocabulary.is_empty(self.vocab, self.config.features.configured_namespaces):\n self._LOGGER.warning(\n \"At least one vocabulary of your features is still empty! \"\n \"The number of trainable parameters usually depends on the size of your vocabulary.\"\n )\n return sum(p.numel() for p in self._model.parameters())", "def num_hyperparameters(self):\n return self._hyperparameters.size", "def num_hyperparameters(self):\n return self._hyperparameters.size", "def _get_param_size(module: torch.nn.Module):\n return sum([p.numel() * torch.tensor([], dtype=p.dtype).element_size() for p in module.parameters()])", "def param_size(module:nn.Module):\n return np.sum(v.numel() for name, v in module.named_parameters() \\\n if \"auxiliary\" not in name)", "def count_params(all_params):\n nparams = len(all_params)\n nparam_vals = 0\n for i in range(nparams):\n param = all_params[i]\n param_shape = tuple(param.get_shape().as_list())\n nparam_vals += np.prod(param_shape)\n return nparam_vals", "def n_elements_one_param(self, param_name):\n p = self._get_one_param(param_name)\n return len(p)", "def count_parameters(also_print=True):\n total = 0\n if also_print:\n logging.info('Model Parameters:')\n for (_, v) in get_vars_to_save_and_restore().items():\n shape = v.get_shape()\n if also_print:\n logging.info('%s %s: %s', v.op.name, shape,\n format_number(shape.num_elements()))\n total += shape.num_elements()\n if also_print:\n logging.info('Total: %s', format_number(total))\n return total", "def local_param_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if not self.symbols[-1][s].isparam: continue\n size += 1\n return size", "def n_params(self, t_id):\n all_params = set()\n for i in range(t_id+1):\n model = self.get_model(i)\n all_params.update(model.parameters())\n all_params.update(model.buffers())\n\n return sum(map(torch.numel, all_params))", "def getNumArguments(self):\n return _libsbml.SBMLExternalValidator_getNumArguments(self)", "def get_num_variables(self):\n return len(self.variables)", "def n_params(model):\n \n n_params=sum([\n np.prod([tensor.size()[k] for k in range(len(tensor.size()))])\n for tensor in list(model.parameters())])\n \n return n_params", "def count_parameters(model, tunable_only: bool = True) -> int:\n if tunable_only:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n else:\n return sum(p.numel() for p in model.parameters())", "def __len__(self):\n if self.args is None:\n return 0\n return len(vars(self.args))", "def count_params(layer):\n params = get_all_params(layer)\n shapes = [p.get_value().shape for p in params]\n counts = [np.prod(shape) for shape in shapes]\n return sum(counts)", "def num_arguments(self) -> int:\n if 'arguments' in self._event:\n return len(self._event['arguments'])\n return 0", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def count_parameters(net):\r\n return sum(p.numel() for p in net.parameters() if p.requires_grad)", "def nVariables(self):\n return len(self.variables)", "def num_vars(self):\n return self.nvars", "def count_layer_params(layer):\n num_params = 0\n name, param_names, dims, _, _ = layer.get_layer_info()\n nparams = len(dims)\n for j in range(nparams):\n num_params += np.prod(dims[j])\n return num_params", "def count_parameters(model):\n\treturn sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model: Tuple[tuple, tuple, tuple, tuple, str]) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def getNumArguments(self):\n return _libsbml.FunctionDefinition_getNumArguments(self)", "def get_num_args(function):\n import inspect\n args = inspect.getfullargspec(function)\n num_args = 0\n if args[0] is not None:\n num_args += len(args[0])\n if 'self' in args[0]:\n num_args -= 1\n if args[1] is not None:\n num_args += len(args[1])\n if args[2] is not None:\n num_args += len(args[2])\n # do not count defaults of keywords conatined in args[3]\n # if args[3] is not None:\n # num_args += len(args[3])\n return num_args", "def get_n_params(var_list):\n return int(np.sum([np.product(\n [x.value for x in var.get_shape()]) for var in var_list]))", "def num_params():\n total_num = 0\n for var in tf.trainable_variables():\n shape = var.get_shape()\n total_num += functools.reduce(operator.mul, [dim.value for dim in shape], 1)\n return total_num", "def count_parameters(sess):\n\n variables_names = [v.name for v in tf.trainable_variables()]\n values = sess.run(variables_names)\n n_params = 0\n\n for k, v in zip(variables_names, values):\n print '-'.center(140, '-')\n print '{:60s}\\t\\tShape: {:20s}\\t{:20} parameters'.format(k, v.shape, v.size)\n\n n_params += v.size\n\n print '-'.center(140, '-')\n print 'Total # parameters:\\t\\t{}\\n\\n'.format(n_params)\n\n return n_params", "def num_vars(self):\n return self._nvars", "def args_count(args: list) -> int:\n\n\treturn len(args)", "def get_arg_count(fun):\n if isclass(fun):\n return len(signature(fun.__call__).parameters)\n return len(signature(fun).parameters)", "def count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"count\")", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def num_params(architecture): #\n \n total_parameters = 0\n for layer in range(1,len(architecture)+1):\n weight_dims = np.shape(architecture['layer{}'.format(layer)][2])\n try:\n params = weight_dims[0]*weight_dims[1]*weight_dims[2]\n except:\n try:\n params = weight_dims[0]*weight_dims[1]\n except:\n try:\n params = weight_dims[0]\n except:\n params = 0\n total_parameters += params\n return total_parameters", "def count_parameters(self) -> Tuple[int, int]:\n c_trained, c_total = 0, 0\n for p in self.parameters():\n increment = reduce(lambda x, y: x * y, p.size())\n if p.requires_grad:\n c_trained += increment\n c_total += increment\n return c_trained, c_total", "def n_global_parameters(self):\n return self.global_transform.n_parameters", "def getNumberOfKeys(self) -> int:\n ...", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def count_objects_of_size(self, n: int, **parameters: int) -> int:", "def count_parms(self):\n min_freq = self.get_high_pass_index()\n rejection = self.rejection_at(np.arange(min_freq, self.nf))\n if rejection.ndim < 2:\n return np.sum(rejection)\n else:\n return np.sum(rejection, axis=1)", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)" ]
[ "0.8879826", "0.8658821", "0.8643426", "0.8621051", "0.8559576", "0.8443666", "0.8400023", "0.83914536", "0.8350942", "0.82651836", "0.8234711", "0.8157073", "0.8155948", "0.8069919", "0.8065736", "0.8064464", "0.8031127", "0.80070335", "0.79923403", "0.79663205", "0.7945741", "0.7935545", "0.7818101", "0.7818101", "0.7818101", "0.78120565", "0.7725746", "0.7723024", "0.77179414", "0.7628572", "0.76046556", "0.7572248", "0.7570735", "0.75700164", "0.7557896", "0.75521433", "0.7539009", "0.75206184", "0.7508909", "0.7508909", "0.7490975", "0.74879545", "0.7417157", "0.7374459", "0.7350927", "0.73338795", "0.73338795", "0.73335385", "0.7220937", "0.71996504", "0.71902335", "0.718394", "0.71831185", "0.7178781", "0.7178781", "0.7155224", "0.7124053", "0.7119426", "0.71173084", "0.70878106", "0.7076072", "0.70697904", "0.70682377", "0.7064471", "0.7053471", "0.70435053", "0.7036322", "0.70234597", "0.7012496", "0.7012374", "0.7012285", "0.7010517", "0.69953436", "0.6975467", "0.69283384", "0.6919441", "0.69165057", "0.6915893", "0.6900342", "0.68976724", "0.6886287", "0.68840647", "0.6871395", "0.68657637", "0.6849131", "0.6837043", "0.6837043", "0.6837043", "0.6837043", "0.6835951", "0.6819374", "0.681689", "0.6787406", "0.6765595", "0.6765595", "0.67568725", "0.67568725", "0.6751849", "0.6710731", "0.6707385", "0.6707385" ]
0.0
-1
Get number of prunable parameters
def num_prunable_parameters(self) -> int: return sum(l.weight.numel() for l in self.out_layers) + \ sum(weight.numel() for weight in self.out_projs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_parameters(self):\n return self.pdm.n_parameters", "def num_params(self):", "def get_parameter_numbers(self) -> int:\n # TODO(jeikeilim): return the number of parameter list of each layers.\n n_param = sum([x.numel() for x in self.model.parameters()])\n return n_param", "def n_parameters(self):\n return sum([p.n_parameters for p in self.parameters])", "def n_parameters(self) -> int:\n return nkjax.tree_size(self.parameters)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters())", "def N(self):\n return len(self.parameters)", "def number_of_parameters(self):\n return len(self.parameters)", "def get_num_parameters(self):\n return len(self.parameters)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def count_params(self):\n self.N = 0\n for name, param in self.model.named_parameters():\n self.N += param.numel()\n self.N_list.append(self.N)", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def get_num_params(self):\n if self.num_params is None:\n self.num_params = len(self.params)\n return self.num_params", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def calculate_num_params(self):\n num_params = 0\n for p in self.parameters():\n num_params += p.data.view(-1).size(0)\n return num_params", "def count_parameters(model, tunable_only: bool = True) -> int:\n if tunable_only:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n else:\n return sum(p.numel() for p in model.parameters())", "def n_parameters(self):\n return len(self._LIST_PARAMETERS)", "def num_parameters(self) -> int:\n return len(self) * self.convention.value", "def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)", "def count_params():\n param_count = np.sum([np.prod(x.get_shape().as_list()) for x in tf.global_variables()])\n return param_count", "def _n_parameters(self):\n raise NotImplementedError", "def nb_parameters(net):\n return sum(p.numel() for p in net.parameters())", "def num_parameters(model):\n return sum([param.nelement() for param in model.parameters()])", "def num_params(self) -> int:\n return self._num_params", "def n_variables(self):\n return sum([p.n_variables for p in self.parameters])", "def num_parameters(self) -> int:\n if self._model:\n return self._model.num_parameters()\n return 0", "def num_parameters(self) -> int:\n if vocabulary.is_empty(self.vocab, self.config.features.configured_namespaces):\n self._LOGGER.warning(\n \"At least one vocabulary of your features is still empty! \"\n \"The number of trainable parameters usually depends on the size of your vocabulary.\"\n )\n return sum(p.numel() for p in self._model.parameters())", "def count_params(model):\n param_count = np.sum([np.prod(p.size()) for p in model.parameters()])\n return param_count", "def get_num_params(self):\n if self.num_params is None:\n import inspect\n argspec = inspect.getfullargspec(self.get_code())\n if argspec.varargs or argspec.varkw:\n self.num_params = -1\n else:\n self.num_params = len(argspec.args)\n return self.num_params", "def count_parameters(model):\r\n count = 0\r\n for parameter in list(model.parameters()):\r\n subcount = 1\r\n for size in list(parameter.size()):\r\n subcount *= size\r\n count += subcount\r\n return count", "def num_params(self):\n return len(self.params)", "def num_param(self):\n return len(self._parameters)", "def num_params(self):\r\n return np.sum([torch.tensor(param.shape).prod()\r\n for param in self.parameters()])", "def num_params(self):\n return np.sum([torch.tensor(param.shape).prod()\n for param in self.parameters()])", "def count_parms(self):\n min_freq = self.get_high_pass_index()\n rejection = self.rejection_at(np.arange(min_freq, self.nf))\n if rejection.ndim < 2:\n return np.sum(rejection)\n else:\n return np.sum(rejection, axis=1)", "def countParameters(self):\n return sum(p.numel() for p in self.model.parameters() if p.requires_grad)", "def _get_parameter_count(self):\n parameters_d = 5;\n size_h = self.model.size_h\n return (size_h - 1) + size_h * (\n (size_h - 1) + parameters_d + (self.model.size_aa - 1) + \n (self.model.size_ss - 1) + (self.model.size_cis - 1)\n )", "def count_params(model):\n total = 0\n for x in model.trainable_variables:\n total += np.prod(x.shape)\n return total", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def N(self) -> int:\n return self.params.N", "def countParam(self):\n return self.decl.args[mpi_array_calls[self.decl.name][self.pos]]", "def param_size(module:nn.Module):\n return np.sum(v.numel() for name, v in module.named_parameters() \\\n if \"auxiliary\" not in name)", "def nVariables(self):\n return len(self.variables)", "def count_parameters(also_print=True):\n total = 0\n if also_print:\n logging.info('Model Parameters:')\n for (_, v) in get_vars_to_save_and_restore().items():\n shape = v.get_shape()\n if also_print:\n logging.info('%s %s: %s', v.op.name, shape,\n format_number(shape.num_elements()))\n total += shape.num_elements()\n if also_print:\n logging.info('Total: %s', format_number(total))\n return total", "def count_parameters():\n total_parameters = 0\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n name = variable.name\n shape = variable.get_shape()\n #print(shape)\n #print(len(shape))\n variable_parameters = 1\n for dim in shape:\n #print(dim)\n variable_parameters *= dim.value\n print(name, [dim for dim in shape], variable_parameters)\n total_parameters += variable_parameters\n print('Number of trainable parameters = {}'.format(total_parameters))", "def count_parameters(model):\n\treturn sum(p.numel() for p in model.parameters() if p.requires_grad)", "def n_params(model):\n \n n_params=sum([\n np.prod([tensor.size()[k] for k in range(len(tensor.size()))])\n for tensor in list(model.parameters())])\n \n return n_params", "def n_global_parameters(self):\n return self.global_transform.n_parameters", "def count_parameters(net):\r\n return sum(p.numel() for p in net.parameters() if p.requires_grad)", "def n_params(self, t_id):\n all_params = set()\n for i in range(t_id+1):\n model = self.get_model(i)\n all_params.update(model.parameters())\n all_params.update(model.buffers())\n\n return sum(map(torch.numel, all_params))", "def num_params():\n total_num = 0\n for var in tf.trainable_variables():\n shape = var.get_shape()\n total_num += functools.reduce(operator.mul, [dim.value for dim in shape], 1)\n return total_num", "def count_parameters(self) -> Tuple[int, int]:\n c_trained, c_total = 0, 0\n for p in self.parameters():\n increment = reduce(lambda x, y: x * y, p.size())\n if p.requires_grad:\n c_trained += increment\n c_total += increment\n return c_trained, c_total", "def num_trainable_parameters(self) -> int:\n if vocabulary.is_empty(self.vocab, self.config.features.configured_namespaces):\n self._LOGGER.warning(\n \"At least one vocabulary of your features is still empty! \"\n \"The number of trainable parameters usually depends on the size of your vocabulary.\"\n )\n return sum(p.numel() for p in self._model.parameters() if p.requires_grad)", "def num_parameters(self) -> int:\n return len(self.w) + prod(self.v.shape) - len(self.v)", "def get_n_parameters(self, exclude_pop_model=False):\n if (self._population_model is None) or exclude_pop_model:\n n_parameters = self._mechanistic_model.n_parameters()\n for error_model in self._error_models:\n n_parameters += error_model.n_parameters()\n return n_parameters\n\n return self._population_model.n_parameters()", "def __len__(self) -> int:\n return len(self.parameters)", "def count_params(layer):\n params = get_all_params(layer)\n shapes = [p.get_value().shape for p in params]\n counts = [np.prod(shape) for shape in shapes]\n return sum(counts)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def len_parameters(self):\n return len(self._Parameters._fields)", "def nvar(self):\n return len(self.__vars)", "def local_param_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if not self.symbols[-1][s].isparam: continue\n size += 1\n return size", "def get_params_count(self):\n\t\treturn call_sdk_function('PrlResult_GetParamsCount', self.handle)", "def get_parameter_number(net):\n # print(type(net.parameters()))\n total_num = sum(p.numel() for p in net.parameters())\n trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)\n return {'Total': total_num, 'Trainable': trainable_num}", "def count_params(all_params):\n nparams = len(all_params)\n nparam_vals = 0\n for i in range(nparams):\n param = all_params[i]\n param_shape = tuple(param.get_shape().as_list())\n nparam_vals += np.prod(param_shape)\n return nparam_vals", "def num_vars(self):\n return self.nvars", "def dimensions(self):\n return len(self.parameter_names)", "def count_parameters(model: Tuple[tuple, tuple, tuple, tuple, str]) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.out_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = (self.masks[i].data.abs() > self.epsilon).sum()\n params += (self.out_projs[i].size(0) + self.out_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in AdaptiveSoftmax layer-{}\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def _get_param_size(module: torch.nn.Module):\n return sum([p.numel() * torch.tensor([], dtype=p.dtype).element_size() for p in module.parameters()])", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.emb_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = (self.masks[i].data.abs() > self.epsilon).sum()\n params += (self.emb_projs[i].size(0) + self.emb_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in layer-{} AdaptiveEmbedding\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def get_total_trainable_parameter_size():\n total_parameters = 0\n import tensorflow as tf\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n total_parameters += np.product([x.value for x in variable.get_shape()])\n return total_parameters", "def count_parameters(sess):\n\n variables_names = [v.name for v in tf.trainable_variables()]\n values = sess.run(variables_names)\n n_params = 0\n\n for k, v in zip(variables_names, values):\n print '-'.center(140, '-')\n print '{:60s}\\t\\tShape: {:20s}\\t{:20} parameters'.format(k, v.shape, v.size)\n\n n_params += v.size\n\n print '-'.center(140, '-')\n print 'Total # parameters:\\t\\t{}\\n\\n'.format(n_params)\n\n return n_params", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def getNumParameters(self):\n return _libsbml.Model_getNumParameters(self)", "def num_params(self):\n raise NotImplemented(\"Abstract, please implement in respective classes\")", "def getNumParameters(self):\n return _libsbml.KineticLaw_getNumParameters(self)", "def __len__(self):\n return self.nb_iterations", "def get_num_variables(self):\n return len(self.variables)", "def GetNumberOfParameters(self):\n return _ITKCostFunctionsPython.itkCostFunction_GetNumberOfParameters(self)", "def get_n_params(var_list):\n return int(np.sum([np.product(\n [x.value for x in var.get_shape()]) for var in var_list]))", "def num_vars(self):\n return len(self.bounds.lb)", "def __len__(self) -> int:\n return len(self.variables)", "def num_vars(self):\n return self._nvars", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def count_total_params(model):\n trainable_count = int(\n numpy.sum([K.count_params(p) for p in set(model.trainable_weights)]))\n non_trainable_count = int(\n numpy.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))\n return trainable_count, non_trainable_count", "def __len__(self):\n # Product function that can handle iterables (np.product can't).\n product = partial(reduce, operator.mul)\n return sum(product(len(v) for v in p.values()) if p else 1\n for p in self.param_grid)", "def __len__(self):\n return len(self.params)", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.out_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = self.masks[i].l0_norm()\n params += (self.out_projs[i].size(0) + self.out_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in AdaptiveSoftmax layer-{}\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def num_parameters(self, train=True) -> torch.Tensor:\n params = torch.tensor(0, dtype=torch.float).to(self.emb_projs[0])\n if train:\n for i in range(len(self.cutoffs)):\n n_proj = self.masks[i].l0_norm()\n params += (self.emb_projs[i].size(0) + self.emb_layers[i].weight.size(0)) * n_proj\n elif self.compiled_projs is not None and self.compiled_embeddings is not None:\n for i in range(len(self.cutoffs)):\n if len(self.indices[i]) == 0:\n warnings.warn(\"Mask is all zero in layer-{} AdaptiveEmbedding\".format(i), RuntimeWarning)\n else:\n params += self.compiled_projs[i].numel() + \\\n self.compiled_embeddings[i].numel()\n return params", "def calculate_num_params(self) -> None:\n for name, param in self.module.named_parameters():\n self.num_params += param.nelement()\n self.trainable &= param.requires_grad\n\n if name == \"weight\":\n ksize = list(param.size())\n # to make [in_shape, out_shape, ksize, ksize]\n if len(ksize) > 1:\n ksize[0], ksize[1] = ksize[1], ksize[0]\n self.kernel_size = ksize\n\n # RNN modules have inner weights such as weight_ih_l0\n elif \"weight\" in name:\n self.inner_layers[name] = list(param.size())" ]
[ "0.75405735", "0.75211614", "0.738875", "0.7387541", "0.73810554", "0.73747927", "0.7355472", "0.73516285", "0.73359424", "0.73142964", "0.73142964", "0.7266938", "0.72314495", "0.72314495", "0.72164834", "0.7203205", "0.7203205", "0.7203205", "0.7185691", "0.71791095", "0.7174818", "0.7153565", "0.7147031", "0.71392816", "0.71322024", "0.7110126", "0.7075877", "0.7061964", "0.70428026", "0.7023761", "0.7022947", "0.7002628", "0.69975686", "0.6995272", "0.69509196", "0.69483453", "0.6928968", "0.6928118", "0.69278497", "0.69151187", "0.6909454", "0.6865862", "0.6845796", "0.6845796", "0.68222374", "0.6812105", "0.68063724", "0.6789041", "0.6788805", "0.67466265", "0.6745521", "0.67444295", "0.67302775", "0.6720803", "0.6719991", "0.67177933", "0.6702116", "0.66793615", "0.6676897", "0.6676889", "0.6667523", "0.66626525", "0.6658684", "0.6658684", "0.6658684", "0.6658684", "0.6657855", "0.66550386", "0.66406214", "0.6617321", "0.66127974", "0.6611052", "0.66075385", "0.66049236", "0.65821505", "0.6576277", "0.65751797", "0.65729064", "0.6565648", "0.65637845", "0.6560861", "0.6560861", "0.65540624", "0.65441906", "0.6537034", "0.6536052", "0.65311366", "0.65074724", "0.64652073", "0.64433426", "0.6442025", "0.643742", "0.6431012", "0.6429944", "0.6423827", "0.6415726", "0.6397735", "0.6387696", "0.6358759" ]
0.7311632
11
Get number of parameters.
def num_parameters(self, train=True) -> torch.Tensor: params = torch.tensor(0, dtype=torch.float).to(self.out_projs[0]) if train: for i in range(len(self.cutoffs)): n_proj = (self.masks[i].data.abs() > self.epsilon).sum() params += (self.out_projs[i].size(0) + self.out_layers[i].weight.size(0)) * n_proj elif self.compiled_projs is not None and self.compiled_embeddings is not None: for i in range(len(self.cutoffs)): if len(self.indices[i]) == 0: warnings.warn("Mask is all zero in AdaptiveSoftmax layer-{}".format(i), RuntimeWarning) else: params += self.compiled_projs[i].numel() + \ self.compiled_embeddings[i].numel() return params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_parameters(self):\n return len(self.parameters)", "def get_num_params(self):\n if self.num_params is None:\n import inspect\n argspec = inspect.getfullargspec(self.get_code())\n if argspec.varargs or argspec.varkw:\n self.num_params = -1\n else:\n self.num_params = len(argspec.args)\n return self.num_params", "def number_of_parameters(self):\n return len(self.parameters)", "def get_num_params(self):\n if self.num_params is None:\n self.num_params = len(self.params)\n return self.num_params", "def num_param(self):\n return len(self._parameters)", "def n_parameters(self):\n return len(self._LIST_PARAMETERS)", "def num_parameters(self) -> int:\n return len(self) * self.convention.value", "def num_params(self) -> int:\n return self._num_params", "def num_params(self):", "def num_params(self):\n return len(self.params)", "def n_parameters(self):\n return self.pdm.n_parameters", "def N(self):\n return len(self.parameters)", "def _n_parameters(self):\n raise NotImplementedError", "def n_parameters(self):\n return sum([p.n_parameters for p in self.parameters])", "def get_parameter_numbers(self) -> int:\n # TODO(jeikeilim): return the number of parameter list of each layers.\n n_param = sum([x.numel() for x in self.model.parameters()])\n return n_param", "def calculate_num_params(self):\n num_params = 0\n for p in self.parameters():\n num_params += p.data.view(-1).size(0)\n return num_params", "def count_params(self):\n self.N = 0\n for name, param in self.model.named_parameters():\n self.N += param.numel()\n self.N_list.append(self.N)", "def get_params_count(self):\n\t\treturn call_sdk_function('PrlResult_GetParamsCount', self.handle)", "def countParam(self):\n return self.decl.args[mpi_array_calls[self.decl.name][self.pos]]", "def n_parameters(self) -> int:\n return nkjax.tree_size(self.parameters)", "def num_parameters(self) -> int:\n if self._model:\n return self._model.num_parameters()\n return 0", "def len_parameters(self):\n return len(self._Parameters._fields)", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters())", "def get_num_parameters(form):\n n_args = len(inspect.signature(form).parameters)\n tree = form(*[SyntaxTreeNode('_' + str(i)) for i in range(n_args)])\n return len(get_unique_parameters(tree))", "def nb_parameters(net):\n return sum(p.numel() for p in net.parameters())", "def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)", "def _get_parameter_count(self):\n parameters_d = 5;\n size_h = self.model.size_h\n return (size_h - 1) + size_h * (\n (size_h - 1) + parameters_d + (self.model.size_aa - 1) + \n (self.model.size_ss - 1) + (self.model.size_cis - 1)\n )", "def GetNumberOfParameters(self):\n return _ITKCostFunctionsPython.itkCostFunction_GetNumberOfParameters(self)", "def num_params(self):\n return np.sum([torch.tensor(param.shape).prod()\n for param in self.parameters()])", "def getNumParameters(self):\n return _libsbml.KineticLaw_getNumParameters(self)", "def __len__(self) -> int:\n return len(self.parameters)", "def num_params(self):\r\n return np.sum([torch.tensor(param.shape).prod()\r\n for param in self.parameters()])", "def getNumParameters(self):\n return _libsbml.Model_getNumParameters(self)", "def dimensions(self):\n return len(self.parameter_names)", "def num_parameters(self) -> int:\n return len(self.w) + prod(self.v.shape) - len(self.v)", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def num_params(self):\n raise NotImplemented(\"Abstract, please implement in respective classes\")", "def count_params(model):\n param_count = np.sum([np.prod(p.size()) for p in model.parameters()])\n return param_count", "def count_parameters(model):\r\n count = 0\r\n for parameter in list(model.parameters()):\r\n subcount = 1\r\n for size in list(parameter.size()):\r\n subcount *= size\r\n count += subcount\r\n return count", "def num_parameters(model):\n return sum([param.nelement() for param in model.parameters()])", "def N(self) -> int:\n return self.params.N", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def count_params():\n param_count = np.sum([np.prod(x.get_shape().as_list()) for x in tf.global_variables()])\n return param_count", "def length(self):\n return int(np.sum([x.length for x in self.parameters]))", "def countParameters(self):\n return sum(p.numel() for p in self.model.parameters() if p.requires_grad)", "def n_variables(self):\n return sum([p.n_variables for p in self.parameters])", "def __len__(self):\n return len(self.params)", "def num_parameters(self) -> int:\n if vocabulary.is_empty(self.vocab, self.config.features.configured_namespaces):\n self._LOGGER.warning(\n \"At least one vocabulary of your features is still empty! \"\n \"The number of trainable parameters usually depends on the size of your vocabulary.\"\n )\n return sum(p.numel() for p in self._model.parameters())", "def num_hyperparameters(self):\n return self._hyperparameters.size", "def num_hyperparameters(self):\n return self._hyperparameters.size", "def _get_param_size(module: torch.nn.Module):\n return sum([p.numel() * torch.tensor([], dtype=p.dtype).element_size() for p in module.parameters()])", "def param_size(module:nn.Module):\n return np.sum(v.numel() for name, v in module.named_parameters() \\\n if \"auxiliary\" not in name)", "def count_params(all_params):\n nparams = len(all_params)\n nparam_vals = 0\n for i in range(nparams):\n param = all_params[i]\n param_shape = tuple(param.get_shape().as_list())\n nparam_vals += np.prod(param_shape)\n return nparam_vals", "def n_elements_one_param(self, param_name):\n p = self._get_one_param(param_name)\n return len(p)", "def count_parameters(also_print=True):\n total = 0\n if also_print:\n logging.info('Model Parameters:')\n for (_, v) in get_vars_to_save_and_restore().items():\n shape = v.get_shape()\n if also_print:\n logging.info('%s %s: %s', v.op.name, shape,\n format_number(shape.num_elements()))\n total += shape.num_elements()\n if also_print:\n logging.info('Total: %s', format_number(total))\n return total", "def local_param_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if not self.symbols[-1][s].isparam: continue\n size += 1\n return size", "def n_params(self, t_id):\n all_params = set()\n for i in range(t_id+1):\n model = self.get_model(i)\n all_params.update(model.parameters())\n all_params.update(model.buffers())\n\n return sum(map(torch.numel, all_params))", "def getNumArguments(self):\n return _libsbml.SBMLExternalValidator_getNumArguments(self)", "def get_num_variables(self):\n return len(self.variables)", "def n_params(model):\n \n n_params=sum([\n np.prod([tensor.size()[k] for k in range(len(tensor.size()))])\n for tensor in list(model.parameters())])\n \n return n_params", "def count_parameters(model, tunable_only: bool = True) -> int:\n if tunable_only:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n else:\n return sum(p.numel() for p in model.parameters())", "def __len__(self):\n if self.args is None:\n return 0\n return len(vars(self.args))", "def count_params(layer):\n params = get_all_params(layer)\n shapes = [p.get_value().shape for p in params]\n counts = [np.prod(shape) for shape in shapes]\n return sum(counts)", "def num_arguments(self) -> int:\n if 'arguments' in self._event:\n return len(self._event['arguments'])\n return 0", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def count_parameters(net):\r\n return sum(p.numel() for p in net.parameters() if p.requires_grad)", "def nVariables(self):\n return len(self.variables)", "def num_vars(self):\n return self.nvars", "def count_layer_params(layer):\n num_params = 0\n name, param_names, dims, _, _ = layer.get_layer_info()\n nparams = len(dims)\n for j in range(nparams):\n num_params += np.prod(dims[j])\n return num_params", "def count_parameters(model):\n\treturn sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model: Tuple[tuple, tuple, tuple, tuple, str]) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def getNumArguments(self):\n return _libsbml.FunctionDefinition_getNumArguments(self)", "def get_num_args(function):\n import inspect\n args = inspect.getfullargspec(function)\n num_args = 0\n if args[0] is not None:\n num_args += len(args[0])\n if 'self' in args[0]:\n num_args -= 1\n if args[1] is not None:\n num_args += len(args[1])\n if args[2] is not None:\n num_args += len(args[2])\n # do not count defaults of keywords conatined in args[3]\n # if args[3] is not None:\n # num_args += len(args[3])\n return num_args", "def get_n_params(var_list):\n return int(np.sum([np.product(\n [x.value for x in var.get_shape()]) for var in var_list]))", "def num_params():\n total_num = 0\n for var in tf.trainable_variables():\n shape = var.get_shape()\n total_num += functools.reduce(operator.mul, [dim.value for dim in shape], 1)\n return total_num", "def count_parameters(sess):\n\n variables_names = [v.name for v in tf.trainable_variables()]\n values = sess.run(variables_names)\n n_params = 0\n\n for k, v in zip(variables_names, values):\n print '-'.center(140, '-')\n print '{:60s}\\t\\tShape: {:20s}\\t{:20} parameters'.format(k, v.shape, v.size)\n\n n_params += v.size\n\n print '-'.center(140, '-')\n print 'Total # parameters:\\t\\t{}\\n\\n'.format(n_params)\n\n return n_params", "def num_vars(self):\n return self._nvars", "def args_count(args: list) -> int:\n\n\treturn len(args)", "def get_arg_count(fun):\n if isclass(fun):\n return len(signature(fun.__call__).parameters)\n return len(signature(fun).parameters)", "def count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"count\")", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def num_params(architecture): #\n \n total_parameters = 0\n for layer in range(1,len(architecture)+1):\n weight_dims = np.shape(architecture['layer{}'.format(layer)][2])\n try:\n params = weight_dims[0]*weight_dims[1]*weight_dims[2]\n except:\n try:\n params = weight_dims[0]*weight_dims[1]\n except:\n try:\n params = weight_dims[0]\n except:\n params = 0\n total_parameters += params\n return total_parameters", "def count_parameters(self) -> Tuple[int, int]:\n c_trained, c_total = 0, 0\n for p in self.parameters():\n increment = reduce(lambda x, y: x * y, p.size())\n if p.requires_grad:\n c_trained += increment\n c_total += increment\n return c_trained, c_total", "def n_global_parameters(self):\n return self.global_transform.n_parameters", "def getNumberOfKeys(self) -> int:\n ...", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def count_objects_of_size(self, n: int, **parameters: int) -> int:", "def count_parms(self):\n min_freq = self.get_high_pass_index()\n rejection = self.rejection_at(np.arange(min_freq, self.nf))\n if rejection.ndim < 2:\n return np.sum(rejection)\n else:\n return np.sum(rejection, axis=1)", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)" ]
[ "0.8879826", "0.8658821", "0.8643426", "0.8621051", "0.8559576", "0.8443666", "0.8400023", "0.83914536", "0.8350942", "0.82651836", "0.8234711", "0.8157073", "0.8155948", "0.8069919", "0.8065736", "0.8064464", "0.8031127", "0.80070335", "0.79923403", "0.79663205", "0.7945741", "0.7935545", "0.7818101", "0.7818101", "0.7818101", "0.78120565", "0.7725746", "0.7723024", "0.77179414", "0.7628572", "0.76046556", "0.7572248", "0.7570735", "0.75700164", "0.7557896", "0.75521433", "0.7539009", "0.75206184", "0.7508909", "0.7508909", "0.7490975", "0.74879545", "0.7417157", "0.7374459", "0.7350927", "0.73338795", "0.73338795", "0.73335385", "0.7220937", "0.71996504", "0.71902335", "0.718394", "0.71831185", "0.7178781", "0.7178781", "0.7155224", "0.7124053", "0.7119426", "0.71173084", "0.70878106", "0.7076072", "0.70697904", "0.70682377", "0.7064471", "0.7053471", "0.70435053", "0.7036322", "0.70234597", "0.7012496", "0.7012374", "0.7012285", "0.7010517", "0.69953436", "0.6975467", "0.69283384", "0.6919441", "0.69165057", "0.6915893", "0.6900342", "0.68976724", "0.6886287", "0.68840647", "0.6871395", "0.68657637", "0.6849131", "0.6837043", "0.6837043", "0.6837043", "0.6837043", "0.6835951", "0.6819374", "0.681689", "0.6787406", "0.6765595", "0.6765595", "0.67568725", "0.67568725", "0.6751849", "0.6710731", "0.6707385", "0.6707385" ]
0.0
-1
create dataset for locate mark area position in omr image
def make_voc_dataset(): # dataset from test omrimage123 # create from test omrimage2 import form_test as ftt # former = ftt.form_21() # omrimage2-1 omr01.jpg, omr2018a # former = ftt.form_22() # omrimage2-2 OMR01.jpg, omr2018b former = ftt.form_6() # omr2018f6 dname = 'omr2018f6' omrmodel = opo.OmrModel() omrxml = OmrVocDataset() omrxml.set_model(omrmodel=omrmodel, omrformer=former) omrxml.save_image_file = 'd:/study/dataset/'+ dname + '/JPEGImages/?' omrxml.save_xml_file = 'd:/study/dataset/' + dname + '/Annotations/?' if not os.path.isdir(omrxml.save_xml_file.replace('?', '')): os.makedirs(omrxml.save_xml_file.replace('?', '')) if not os.path.isdir(omrxml.save_image_file.replace('?', '')): os.makedirs(omrxml.save_image_file.replace('?', '')) omrxml.create_dataset()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_annotation_data(self):\n for i, hp in enumerate(self.hanging_point_in_camera_coords_list):\n px, py = self.camera_model.project3d_to_pixel(hp.worldpos())\n if self.save_debug_image:\n self.bgr_axis = self.bgr.copy()\n if 0 <= px < self.target_width and 0 <= py < self.target_height:\n if self.save_debug_image:\n draw_axis(self.bgr_axis,\n hp.worldrot(),\n hp.worldpos(),\n self.camera_model.K)\n create_gradient_circle(\n self.annotation_img,\n int(py), int(px))\n if self.visible_labels == []:\n self.annotation_data.append(\n {'xy': [int(px), int(py)],\n 'depth': hp.worldpos()[2] * 1000,\n 'quaternion': hp.quaternion.tolist()}\n )\n else:\n self.annotation_data.append(\n {'xy': [int(px), int(py)],\n 'depth': hp.worldpos()[2] * 1000,\n 'quaternion': hp.quaternion.tolist(),\n 'label': self.visible_labels[i]}\n )\n self.rotation_map.add_quaternion(\n int(px), int(py), hp.quaternion)\n\n # self.depth_map.add_depth(\n # int(px), int(py),\n # hp.worldpos()[2] * 1000)\n\n if np.all(self.annotation_img == 0):\n print('out of camera')\n return False\n\n self.annotation_img \\\n = self.annotation_img / self.annotation_img.max() * 255\n self.annotation_img = self.annotation_img.astype(np.uint8)\n\n self.rotations = self.rotation_map.rotations\n\n # self.hanging_points_depth = self.depth_map.on_depth_image(self.depth)\n\n return True", "def load_test_dataset():\n\n def gen_image(resolution, x1, y1, x2, y2):\n width, height = resolution\n image = np.full([height, width, 3], fill_value=255, dtype=np.uint8)\n image[int(y1 * height) : int(y2 * height), int(x1 * width) : int(x2 * width), :] = np.array(\n [0, 128, 128], dtype=np.uint8\n )[None, None, :]\n return image, Rectangle(x1=x1, y1=y1, x2=x2, y2=y2)\n\n images = [\n gen_image((640, 480), 0.0, 0.0, 0.5, 0.5),\n gen_image((640, 480), 0.5, 0.0, 1.0, 0.5),\n gen_image((640, 480), 0.0, 0.5, 0.5, 1.0),\n gen_image((640, 480), 0.5, 0.5, 1.0, 1.0),\n ]\n labels = [LabelEntity(name=\"rect\", domain=Domain.DETECTION, id=ID(\"0\"))]\n\n def get_image(i, subset):\n image, bbox = images[i]\n return DatasetItemEntity(\n media=Image(data=image),\n annotation_scene=AnnotationSceneEntity(\n annotations=[Annotation(bbox, labels=[ScoredLabel(label=labels[0])])],\n kind=AnnotationSceneKind.ANNOTATION,\n ),\n subset=subset,\n )\n\n items = [\n get_image(0, Subset.TRAINING),\n get_image(1, Subset.TRAINING),\n get_image(2, Subset.TRAINING),\n get_image(3, Subset.TRAINING),\n get_image(0, Subset.TRAINING),\n get_image(1, Subset.TRAINING),\n get_image(2, Subset.TRAINING),\n get_image(3, Subset.TRAINING),\n get_image(0, Subset.TRAINING),\n get_image(1, Subset.TRAINING),\n get_image(0, Subset.VALIDATION),\n get_image(1, Subset.VALIDATION),\n get_image(2, Subset.VALIDATION),\n get_image(3, Subset.VALIDATION),\n get_image(0, Subset.TESTING),\n get_image(1, Subset.TESTING),\n get_image(2, Subset.TESTING),\n get_image(3, Subset.TESTING),\n ]\n return DatasetEntity(items), labels", "def detect_marks(self, image_np):\r\n\r\n # # Actual detection.\r\n predictions = self.model.signatures[\"predict\"](\r\n tf.constant(image_np, dtype=tf.uint8))\r\n\r\n # Convert predictions to landmarks.\r\n marks = np.array(predictions['output']).flatten()[:136]\r\n marks = np.reshape(marks, (-1, 2))\r\n\r\n return marks", "def read_and_select(fles, var, area):\n \n ds = xr.open_mfdataset(fles)\n \n # For 20CRv2c geopotential height \n if(var=='hgt'): \n ds = ds.sel(level=150.0)\n \n try:\n ds = ds.rename({'longitude': 'lon', 'latitude': 'lat'}) \n except: \n pass\n \n \n if(ds.lon.values.max() > 350):\n ds = ds.assign_coords(lon=(((ds.lon + 180) % 360) - 180))\n rolls = np.sum(ds.lon.values < 0); ds = ds.roll(lon=rolls*(-1))\n\n if(ds.lat.values[0] > ds.lat.values[-1]):\n ds['lat'] = np.flipud(ds['lat'])\n ds[var].values = np.flip(ds[var], axis=1)\n\n # For 20CRv2c snow cover\n if(var=='snowc'): \n ds[var] = ds[var]/100.\n ds[var] = ds[var].where(ds[var]>=0.5, other=0.0)\n ds[var] = ds[var].where(ds[var] <0.5, other=1.0)\n \n # For HadISST1\n if((var=='sst')|(var=='sic')): \n mask = ds[var].values == -1000.\n ds[var].values[mask] = np.nan\n \n if( area=='europe'): ds = ds.squeeze().sel(lat=slice( 33,73), lon=slice(-12,40)) \n elif(area=='westeu'): ds = ds.squeeze().sel(lat=slice(42,59), lon=slice(-10,17))\n elif(area=='easeur'): ds = ds.squeeze().sel(lat=slice(38,56), lon=slice(17,43))\n elif(area=='meditr'): ds = ds.squeeze().sel(lat=slice(30,45), lon=slice(0,25))\n elif(area=='scandi'): ds = ds.squeeze().sel(lat=slice( 55,71), lon=slice( 4,34)) \n elif(area=='norhem'): ds = ds.squeeze().sel(lat=slice(-10,87)) \n elif(area=='norpol'): ds = ds.squeeze().sel(lat=slice( 50,87))\n else: ds = ds.squeeze()\n \n return ds", "def add_roi_to_dataset(dataset):\n return [(data[0], data[1], data[2], (data[1] * data[2]) / 100) for data in dataset]", "def prepare_cityscapes_data(seed=1, percent=30.0, version=2017):\n def _save_anno(name, images, annotations):\n \"\"\"Save annotation\n \"\"\"\n print('>> Processing data {}.json saved ({} images {} annotations)'.format(\n name, len(images), len(annotations)))\n new_anno = {}\n new_anno['images'] = images\n new_anno['annotations'] = annotations\n new_anno['categories'] = anno['categories']\n\n with open(\n '{root}/{save_name}.json'.format(\n save_name=name, root=DATA_DIR),\n 'w') as f:\n json.dump(new_anno, f)\n print('>> Data {}.json saved ({} images {} annotations)'.format(\n name, len(images), len(annotations)))\n\n np.random.seed(seed)\n \n anno = json.load(open(os.path.join(DATA_DIR, 'instancesonly_filtered_gtFine_train.json')))\n\n image_list = anno['images']\n labeled_tot = int(percent / 100. * len(image_list))\n #labeled_ind = np.random.choice(range(len(image_list)), size=labeled_tot)\n labeled_ind = np.arange(len(image_list))\n np.random.shuffle(labeled_ind)\n labeled_ind = labeled_ind[0:labeled_tot]\n\n labeled_id = []\n labeled_images = []\n unlabeled_images = []\n labeled_ind = set(labeled_ind)\n for i in range(len(image_list)):\n if i in labeled_ind:\n labeled_images.append(image_list[i])\n labeled_id.append(image_list[i]['id'])\n else:\n unlabeled_images.append(image_list[i])\n\n # get all annotations of labeled images\n labeled_id = set(labeled_id)\n labeled_annotations = []\n unlabeled_annotations = []\n for an in anno['annotations']:\n if an['image_id'] in labeled_id:\n labeled_annotations.append(an)\n else:\n unlabeled_annotations.append(an)\n\n # save labeled and unlabeled\n save_name = 'instancesonly_filtered_gtFine_train.{seed}@{tot}'.format(\n version=version, seed=seed, tot=int(percent))\n _save_anno(save_name, labeled_images, labeled_annotations)\n save_name = 'instancesonly_filtered_gtFine_train.{seed}@{tot}-unlabeled'.format(\n version=version, seed=seed, tot=int(percent))\n _save_anno(save_name, unlabeled_images, unlabeled_annotations)", "def __init__(self):\n DetectLandmarks.__init__(self)\n self.red_l = 0\n self.green_l = 0\n self.blue_l = 0\n self.red_e = 0\n self.green_e = 0\n self.blue_e = 0\n self.debug = 0\n self.image = 0\n self.width = 0\n self.height = 0\n self.im_copy = 0\n self.lip_x = []\n self.lip_y = []", "def get_annotation_dataframe_compact(self): \n temp_df = pd.DataFrame(self.annotation_line_list)\n # make a list with the annotations for each bbox (each row of the fata frame)\n temp_df['annon'] = list(zip(list(zip(temp_df['xmin'], temp_df['ymin'], temp_df['xmax'], temp_df['ymax'])), temp_df['class_name']))\n # group the df based on im_full_path\n grouped = temp_df.groupby(['img_full_path'])\n # create tuples of the grouped rows columns\n df_serie = grouped['annon'].aggregate(lambda x: tuple(x))\n return df_serie.to_frame()", "def data_assemble(self, x,y, r_cut, add_mask=5, pick_choice=False):\n #segmentation components\n obj_masks,center_mask_info, segments_deblend_list = self._seg_image(x, y, r_cut=r_cut)\n data_masks_center, _, xcenter, ycenter, c_index = center_mask_info\n image = self.cut_image(x,y,r_cut)\n self.raw_image = image\n src_mask = np.zeros_like(image)\n lens_mask = np.zeros_like(image)\n plu_mask = np.zeros_like(image)\n lenslight_mask_index = []\n if self.segmap is not None and self.interaction:\n segmap=self.segmap[0].data\n segdata = segmap[x - r_cut:x + r_cut + 1, y - r_cut:y + r_cut + 1]\n plt.imshow(segdata, origin='lower')\n nlabel = np.unique(segdata)\n for i in range(nlabel.shape[0] - 1):\n ax = (int((np.where(segdata == nlabel[i + 1])[0].max() - np.where(segdata == nlabel[i + 1])[0].min()) / 2 +\n np.where(segdata == nlabel[i + 1])[0].min()))\n ay = (int((np.where(segdata == nlabel[i + 1])[1].max() - np.where(segdata == nlabel[i + 1])[1].min()) / 3 +\n np.where(segdata == nlabel[i + 1])[1].min()))\n plt.text(ay, ax, repr(nlabel[i + 1]), color='r', fontsize=15)\n plt.title('Input segmentation map')\n plt.show()\n source_mask_index = [int(sidex) for sidex in input('Selection of data via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in source_mask_index:\n src_mask = src_mask + segdata*(segdata==i*1)\n # lens light\n lenslightyn = input('Hint: is there lens light? (y/n): ')\n if lenslightyn == 'y':\n lenslight_mask_index = [int(lidex) for lidex in input('Selection of lens-plane light via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in lenslight_mask_index:\n lens_mask = (lens_mask + segdata*(segdata==i*1))\n elif lenslightyn == 'n':\n lenslight_mask_index = []\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n # contamination\n pluyn = input('Hint: is there contamination? (y/n): ')\n if pluyn == 'y':\n plution_mask_index = [int(pidex) for pidex in input('Selection of contamination via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in plution_mask_index:\n plu_mask = (plu_mask + segdata*(segdata==i*1))\n elif pluyn == 'n':\n plu_mask = np.zeros_like(image)\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n\n\n\n if self.segmap is None and self.interaction:\n self.plot_segmentation(image, segments_deblend_list, xcenter, ycenter, c_index)\n #source light\n if pick_choice:\n source_mask_index = [int(sidex) for sidex in input('Selection of data via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in source_mask_index:\n src_mask = src_mask + obj_masks[i]\n #lens light\n lenslightyn = input('Hint: is there lens light? (y/n): ')\n if lenslightyn == 'y':\n lenslight_mask_index = [int(lidex) for lidex in input('Selection of lens-plane light via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in lenslight_mask_index:\n lens_mask = (lens_mask + obj_masks[i])\n elif lenslightyn == 'n':\n lenslight_mask_index = []\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n # contamination\n pluyn = input('Hint: is there contamination? (y/n): ')\n if pluyn == 'y':\n plution_mask_index = [int(pidex) for pidex in input('Selection of contamination via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in plution_mask_index:\n plu_mask = (plu_mask + obj_masks[i])\n elif pluyn == 'n':\n plu_mask = np.zeros_like(image)\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n else:\n src_mask = data_masks_center\n\n\n #adding pixels around the selected masks\n selem = np.ones((add_mask, add_mask))\n src_mask = ndimage.binary_dilation(src_mask.astype(np.bool), selem)\n plu_mask_out = ndimage.binary_dilation(plu_mask.astype(np.bool), selem)\n plu_mask_out = (plu_mask_out - 1)*-1\n\n #select source region to fit, or to use whole observation to fit\n ##1.select source region to fit\n snr = self.snr\n source_mask = image * src_mask\n #create background image for picked\n if self.background_rms is None:\n _, _, std = sigma_clipped_stats(image, sigma=snr, mask=source_mask)\n tshape = image.shape\n img_bkg = make_noise_image(tshape, distribution='gaussian', mean=0., stddev=std, seed=12)\n else:\n tshape = image.shape\n std=np.mean(self.background_rms)\n img_bkg = make_noise_image(tshape, distribution='gaussian', mean=0., stddev=std, seed=12)\n\n no_source_mask = (src_mask * -1 + 1) * img_bkg\n picked_data = source_mask + no_source_mask\n\n ##2.use whole observation to fit while mask out the contamination\n maskedimg = image * plu_mask_out\n\n ##orginize the output 'kwargs_data'\n kwargs_data = {}\n if pick_choice:\n kwargs_data['image_data'] = picked_data#select source region to fit\n else:\n kwargs_data['image_data'] = maskedimg#use whole observation to fit while mask out the contamination\n\n if self.background_rms is None:\n kwargs_data['background_rms'] = std\n self.background_rms = std\n else:\n kwargs_data['background_rms'] = np.mean(self.background_rms)\n kwargs_data['exposure_time'] = self.exp_time\n kwargs_data['transform_pix2angle'] = np.array([[1, 0], [0, 1]]) * self.deltaPix\n ra_at_xy_0 = (y - r_cut) * self.deltaPix # (ra,dec) is (y_img,x_img)\n dec_at_xy_0 = (x - r_cut) * self.deltaPix\n kwargs_data['ra_at_xy_0'] = ra_at_xy_0\n kwargs_data['dec_at_xy_0'] = dec_at_xy_0\n\n #coordinate of the lens light\n xlenlight, ylenlight = [], []\n if lenslight_mask_index !=[]:\n for i in lenslight_mask_index:\n xlenlight.append(ra_at_xy_0 + int(xcenter[i]) * self.deltaPix )\n ylenlight.append(dec_at_xy_0 + int(ycenter[i])* self.deltaPix )\n\n #for output\n self.data = kwargs_data['image_data']\n self.kwargs_data = kwargs_data\n self.data_mask = src_mask\n self.lens_mask = lens_mask\n self.plu_mask = plu_mask_out\n self.obj_masks = obj_masks\n imageData = ImageData(**kwargs_data)\n self.imageData = imageData\n kwargs_seg = [segments_deblend_list, xcenter, ycenter, c_index]\n\n return kwargs_data, kwargs_seg, [xlenlight, ylenlight]", "def __call__(self, *args, **kwargs):\n\n dataset = TextOnlyCocoAnnotation()\n\n for image_name in tqdm(sorted(os.listdir(self.folder))):\n if image_name.endswith('JPG'):\n image_path = os.path.join(self.folder, image_name)\n annotation_path = os.path.join(self.folder, image_name.replace('.JPG', '.gt'))\n\n with open(annotation_path, encoding='utf-8-sig') as read_file:\n content = [line.strip() for line in read_file.readlines()]\n for line in content:\n dataset.add_bbox(image_path, imagesize.get(image_path),\n self.parse_line(line))\n\n return dataset", "def identify_landmarks(image: numpy.ndarray) -> Dict[int, Tuple[int, int]]:\n copy_img = image.copy()\n\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor(os.path.join(\n project_dir, \"shufflealgos\", \"image\", \"landmarks\",\n \"shape_predictor_68_face_landmarks.dat\"))\n\n gray_img_array = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n found_face = detector(gray_img_array)\n\n face_lmarks: Dict[int, Tuple[int, int]] = dict()\n\n # Assume only one face is found for our purposes\n for idx, face in enumerate(found_face, start=1):\n landmarks = predictor(gray_img_array, face)\n\n for n in range(68):\n lmx = landmarks.part(n).x\n lmy = landmarks.part(n).y\n\n face_lmarks[n + 1] = (lmx, lmy)\n\n cv2.circle(copy_img, (lmx, lmy), 1, (50, 50, 255), -1)\n\n cv2.putText(\n copy_img, str(n), (lmx, lmy - 5),\n cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.6, (0, 0, 255), 1)\n\n return face_lmarks", "def read_annotation_yolov5(bbox_path):\n\n # image_paths = get_lists_in_dir(rawImage_dir)\n\n dw = 1./(camera_resolution[0]) # 1 / image width\n dh = 1./(camera_resolution[1]) # 1 / image height\n\n # Read in bbox coordinate information from bbox_information.txt\n dimension_list = []\n with open(bbox_path, 'r') as annotation_file:\n content = annotation_file.read().splitlines()\n\n for n in content:\n # x = int(n.split()[0])+int(n.split()[2])/2\n # y = int(n.split()[1])+int(n.split()[3])/2\n # w = int(n.split()[2])\n # h = int(n.split()[3])\n #\n # x = x*dw\n # w = w*dw\n # y = y*dh\n # h = h*dh\n\n bb = n.split()\n w = int(bb[2])\n h = int(bb[3])\n\n start_x = int(bb[0])\n start_y = int(bb[1])\n\n center_x = start_x + w / 2\n center_y = start_y + h / 2\n\n x = center_x * dw\n y = center_y * dh\n w = w * dw\n h = h * dh\n \n dimension_list.append((x, y, w, h))\n\n return dimension_list", "def getAnnotation(self, ind):\n if not np.issubdtype(ind, np.integer):\n logger.warning(f'myMplCanvas.getAnnotation() got bad ind: {ind} {type(ind)}')\n return\n\n xStat = self.stateDict['xStat']\n yStat = self.stateDict['yStat']\n groupByColumnName = self.stateDict['groupByColumnName']\n\n analysisName = self.plotDf.at[ind, groupByColumnName]\n \n # into the master df, only if viewing raw\n index = self.plotDf.at[ind, 'index']\n \n try:\n region = self.plotDf.at[ind, 'Region'] # not all will have this\n except (KeyError) as e:\n region = 'n/a'\n try:\n _sex = self.plotDf.at[ind, 'Sex'] # not all will have this\n except (KeyError) as e:\n _sex = 'n/a'\n\n xVal = self.plotDf.at[ind, xStat]\n yVal = self.plotDf.at[ind, yStat]\n\n # oligo specific\n _masterDf = self.stateDict['masterDf']\n \n # corerect\n try:\n grandParentFolder = self.plotDf.at[ind, 'grandParentFolder']\n grandParentFolder = str(grandParentFolder)\n except (KeyError) as e:\n grandParentFolder = None\n\n parentFolder = self.plotDf.at[ind, 'parentFolder']\n file = self.plotDf.at[ind, 'file']\n\n # grandParentFolder = _masterDf.at[ind, 'grandParentFolder']\n # grandParentFolder = str(grandParentFolder)\n # parentFolder = _masterDf.at[ind, 'parentFolder']\n # file = _masterDf.at[ind, 'file']\n\n # print('xxx grandParentFolder:', grandParentFolder, type(grandParentFolder))\n # print('xxx parentFolder:', parentFolder)\n # print('xxx file:', file)\n \n if grandParentFolder is not None:\n path = os.path.join(grandParentFolder, parentFolder, file)\n else:\n parentFolder = str(parentFolder)\n path = os.path.join(parentFolder, file)\n\n returnDict = {\n 'ind': ind,\n 'index': index,\n 'analysisName': analysisName,\n 'region': region,\n 'Sex': _sex,\n 'xVal': xVal,\n 'yVal': yVal,\n #'plotDf': self.plotDf, # potentially very big\n 'path': path,\n }\n return returnDict", "def outline_geoids(sf, df, geoids, include_labels=True):\n# df = read_shapefile(sf)\n# df['tract_geoid'] = df.GEOID.str[:11]\n bg_id = []\n for i in geoids:\n bg_id.append(df[df.GEOID==i].index[0])\n\n itr = 0\n for shape in sf.shapeRecords():\n if itr in bg_id:\n x = [i[0] for i in shape.shape.points[:]]\n y = [i[1] for i in shape.shape.points[:]]\n plt.plot(x, y, 'k')\n \n \n if include_labels:\n x0 = np.mean(x)\n y0 = np.mean(y)\n label = df.iloc[itr].density_label\n\n plt.text(x0, y0, label, fontsize=8)\n \n itr = itr+1", "def __insert_data_in_img(self):\n data_df = pd.read_csv(\n os.path.join(\n self.shap_logs_path,\n \"SHAP_summary_{}_{}_{}.csv\".format(\n self.classifier_name, \"PRESENT\", self.datetime\n ),\n ),\n index_col=0,\n )\n for feature_category in self.unique_feature_category_names:\n self.category_img_dict[feature_category][\"value\"] = int(\n data_df.loc[feature_category, :].sum()\n )\n\n for row_cnt, (feature_category_name, feature_data) in enumerate(\n self.category_img_dict.items()\n ):\n arrow_width = int(\n (self.baseline_scale_img.shape[1] / 100) * abs(feature_data[\"value\"])\n )\n if feature_data[\"value\"] > 0:\n arrow_end = (self.arrow_start[0] + arrow_width, self.arrow_start[1])\n arrow_middle = int(\n ((arrow_end[1] - self.arrow_start[1]) / 2) + self.arrow_start[1] - 7\n )\n for bracket_no, bracket in enumerate(self.ranges_lst):\n if abs(feature_data[\"value\"]) in bracket:\n color = (\n self.positive_arrow_colors[bracket_no][2],\n self.positive_arrow_colors[bracket_no][1],\n self.positive_arrow_colors[bracket_no][0],\n )\n cv2.arrowedLine(\n self.img, self.arrow_start, arrow_end, color, 5, tipLength=0.1\n )\n cv2.putText(\n self.img,\n \"+\" + str(abs(feature_data[\"value\"])) + \"%\",\n (arrow_end[0] - 7, arrow_middle - 15),\n cv2.FONT_HERSHEY_COMPLEX,\n 1,\n color,\n 2,\n )\n\n else:\n arrow_end = (self.arrow_start[0] - arrow_width, self.arrow_start[1])\n arrow_middle = int(\n ((self.arrow_start[1] - arrow_end[1]) / 2) + arrow_end[1] - 7\n )\n for bracket_no, bracket in enumerate(self.ranges_lst):\n if abs(feature_data[\"value\"]) in bracket:\n color = (\n self.negative_arrow_colors[bracket_no][2],\n self.negative_arrow_colors[bracket_no][1],\n self.negative_arrow_colors[bracket_no][0],\n )\n cv2.arrowedLine(\n self.img, self.arrow_start, arrow_end, color, 5, tipLength=0.1\n )\n cv2.putText(\n self.img,\n \"-\" + str(abs(feature_data[\"value\"])) + \"%\",\n (arrow_end[0] - 7, arrow_middle - 15),\n cv2.FONT_HERSHEY_COMPLEX,\n 1,\n color,\n 2,\n )\n\n if row_cnt != (len(list(self.category_img_dict.keys())) - 1):\n self.arrow_start = (\n arrow_end[0],\n self.side_scale_y_tick_cords[row_cnt + 1][0],\n )\n\n small_arrow_top_left = (\n int(arrow_end[1]) + 20,\n int(arrow_end[0] - self.small_arrow_img.shape[1] / 2),\n )\n small_arrow_bottom_right = (\n small_arrow_top_left[0] + self.small_arrow_img.shape[0],\n small_arrow_top_left[1] + self.small_arrow_img.shape[1],\n )\n self.img[\n small_arrow_top_left[0] : small_arrow_bottom_right[0],\n small_arrow_top_left[1] : small_arrow_bottom_right[1],\n ] = self.small_arrow_img\n color_bar_top_left = (\n arrow_end[1] + self.small_arrow_img.shape[0] + 25,\n self.baseline_scale_top_left[1],\n )\n color_bar_bottom_right = (\n color_bar_top_left[0] + self.color_bar_img.shape[0],\n color_bar_top_left[1] + self.color_bar_img.shape[1],\n )\n self.img[\n color_bar_top_left[0] : color_bar_bottom_right[0],\n color_bar_top_left[1] : color_bar_bottom_right[1],\n ] = self.color_bar_img\n\n color_bar_middle = (\n (int(580 + self.baseline_scale_img.shape[1] / 2)),\n color_bar_bottom_right[0] + 50,\n )\n cv2.putText(\n self.img,\n \"CLASSIFICATION PROBABILITY\",\n color_bar_middle,\n cv2.FONT_HERSHEY_COMPLEX,\n 1,\n (0, 0, 0),\n 2,\n )\n cv2.imwrite(self.img_save_path, self.img)\n self.visualization_timer.stop_timer()\n stdout_success(\n msg=f\"SHAP summary graph saved at {self.img_save_path}\",\n elapsed_time=self.visualization_timer.elapsed_time_str,\n )", "def get_landmarks(self, image): # from https://www.paulvangent.com/2016/08/05/emotion-recognition-using-facial-landmarks/\n # Ask the detector to find the bounding boxes of each face. The 1 in the\n # second argument indicates that we should upsample the image 1 time. This\n # will make everything bigger and allow us to detect more faces.\n detections = self.detector(image, 1)\n if len(detections) < 1: # Number of faces detected = 0\n # print(\"Number of faces detected: {}\".format(len(detections)))\n return None\n # Draw Facial Landmarks with the predictor class\n shape = self.predictor(image, detections[0])\n xlist = []\n ylist = []\n for i in range(68): # Store X and Y coordinates in two lists\n xlist.append(float(shape.part(i).x))\n ylist.append(float(shape.part(i).y))\n\n landmarks_vectorised = []\n landmarks_vectorised = self.our_ft_landmark(xlist, ylist)# Extaraction des features\n\n xmean = np.mean(xlist)\n ymean = np.mean(ylist)\n xcentral = [(x-xmean) for x in xlist]\n ycentral = [(y-ymean) for y in ylist]\n \n for x, y, w, z in zip(xcentral, ycentral, xlist, ylist):\n landmarks_vectorised.append(w)\n landmarks_vectorised.append(z)\n # landmarks_vectorised.append(x)\n # landmarks_vectorised.append(y)\n meannp = np.asarray((ymean, xmean))\n coornp = np.asarray((z, w))\n dist = np.linalg.norm(coornp-meannp)# Distance euclidienne\n landmarks_vectorised.append(dist)\n landmarks_vectorised.append((math.atan2(y, x)*360)/(2*math.pi))# Calcule de l'ongle entre le moyenne et un point\n\n return landmarks_vectorised", "def test_nominal_case(self):\n\n image_filename, boxes = list(annotation.read(self.filename))\n self.assertEqual(image_filename, 'image.jpg')\n self.assertEqual(len(boxes), 2)\n width = 400\n height = 300\n b = boxes[0]\n self.assertEqual(b.xmin, 10 / width)\n self.assertEqual(b.ymin, 20 / height)\n self.assertEqual(b.xmax, 30 / width)\n self.assertEqual(b.ymax, 40 / height)", "def data_shapes(self):", "def datasetratiocopy_extend(l,ratio,x_offset,y_offset):#全部四边上的点都延伸\r\n dataset=[]\r\n for polyline in l:\r\n newpolyline=[]\r\n for pos in polyline:\r\n pos_x=pos[0]\r\n pos_y=pos[1]\r\n if abs((abs(pos_x)-globalconfig.X_LENGTH/2))<0.01: #judge if the pos is on the origin outline,if on outline,will be moved to the new enlarged outline and plus an extene length\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+(abs(pos_x)/pos_x*globalconfig.X_EXTENDED_LENGTH)+x_offset \r\n else:\r\n pos_x=pos[0]/ratio+x_offset\r\n if abs((abs(pos_y)-globalconfig.Y_LENGTH/2))<0.01:\r\n pos_y=pos[1]/globalconfig.CENTER_RATIO+(abs(pos_y)/pos_y*globalconfig.Y_EXTENDED_LENGTH)+y_offset\r\n else:\r\n pos_y=pos[1]/ratio+y_offset \r\n newpolyline.append([pos_x,pos_y])\r\n dataset.append(newpolyline)\r\n return dataset", "def datasetratiocopy_xr_extend(l,ratio,x_offset,y_offset):#只延伸上下两边以及右边的点\r\n dataset=[]\r\n for polyline in l:\r\n newpolyline=[]\r\n for pos in polyline:\r\n pos_x=pos[0]\r\n pos_y=pos[1]\r\n if abs((abs(pos_x)-globalconfig.X_LENGTH/2))<0.01: \r\n if pos_x>0: #judge if the pos is on the origin outline,if on outline,will be moved to the new enlarged outline and plus an extene length\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+(abs(pos_x)/pos_x*globalconfig.X_EXTENDED_LENGTH)+x_offset\r\n else:\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+x_offset \r\n else:\r\n pos_x=pos[0]/ratio+x_offset\r\n if abs((abs(pos_y)-globalconfig.Y_LENGTH/2))<0.01:\r\n pos_y=pos[1]/globalconfig.CENTER_RATIO+(abs(pos_y)/pos_y*globalconfig.Y_EXTENDED_LENGTH)+y_offset\r\n else:\r\n pos_y=pos[1]/ratio+y_offset \r\n newpolyline.append([pos_x,pos_y])\r\n dataset.append(newpolyline)\r\n return dataset", "def training_data_generation(DATA_DIR, img_height_size, img_width_size, label_list):\r\n \r\n img_ms_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n img_pan_files = glob.glob(DATA_DIR + '\\\\Train_Pan' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_ms_array_list = []\r\n img_pan_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_ms_files)):\r\n with rasterio.open(img_ms_files[file]) as f:\r\n metadata = f.profile\r\n img_ms = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(img_pan_files[file]) as g:\r\n metadata_pan = g.profile\r\n img_pan = np.expand_dims(g.read(1), axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n if (img_height_size % ms_to_pan_ratio) != 0 or (img_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both img_height_size and img_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n mask = training_mask_generation(img_pan_files[file], polygon_files[file], labels = label_list)\r\n \r\n img_ms_array, img_pan_array, mask_array = image_clip_to_segment_and_convert(img_ms, img_pan, mask, ms_to_pan_ratio, \r\n img_height_size, img_width_size)\r\n \r\n img_ms_array_list.append(img_ms_array)\r\n img_pan_array_list.append(img_pan_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_ms_full_array = np.concatenate(img_ms_array_list, axis = 0)\r\n img_pan_full_array = np.concatenate(img_pan_array_list, axis = 0)\r\n mask_full_array = to_categorical(np.concatenate(mask_array_list, axis = 0), num_classes = len(label_list))\r\n \r\n return img_ms_full_array, img_pan_full_array, mask_full_array", "def __init__(self, df, num_classes, image_size, device):\n self.maps = df['map_path'].tolist() \n self.contours = df['contourLevel'].tolist()\n self.points = df['tagged_points_path'].tolist()\n self.masks = df['tagged_path'].tolist()\n self.num_classes = num_classes\n self.image_size = image_size\n self.device = device", "def get_tomo_data(self,threshold=20.):\n\t\tdset = raytomo.RayTomoDataSet(self.attrs['tomo_f'])\n\t\tfor prd in self.attrs['prd_arr']:\n\t\t\tgroup = self['%g_sec'%( prd )]\n\t\t\tdset.get_data4plot(dataid=self.attrs['dataid'].decode('utf-8'), period=prd)\n\t\t\tpdens = dset.pdens\n\t\t\tmask_pdens = dset.pdens < threshold\n\t\t\ttomo_data = np.ma.masked_array(dset.vel_iso, mask=mask_pdens)\n\t\t\tgroup.create_dataset(name='tomo_data', data=dset.vel_iso) # phase velocity map\n\t\t\tgroup.create_dataset(name='tomo_data_msk', data=mask_pdens) # save the mask array seperately. h5 file doesn't support masked array\n\t\t\tgroup.create_dataset(name='latArr', data=dset.latArr)\n\t\t\tgroup.create_dataset(name='lonArr', data=dset.lonArr)\n\t\treturn", "def build_dataset(self):\n print(\"reading data of images currently , please wait......\")\n x_train, y_train, _ = get_images(self.train_directory)\n x_test, y_test, _ = get_images(self.test_directory)\n x_train, y_train = image_subset(self.num_classes, x_train, y_train)\n x_test, y_test = image_subset(self.num_classes, x_test, y_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n self.x_train = x_train / 255\n self.x_test = x_test / 255\n self.y_train = utils.to_categorical(y_train, self.num_classes)\n self.y_test = utils.to_categorical(y_test, self.num_classes)", "def get_landmarks(self,image):\n landmarks=[]\n #Convert image to gray\n gray = cvtColor(image,COLOR_BGR2GRAY)\n #deNoise image\n gray = medianBlur(gray,self.kernalSize)\n #Convert into a binary image\n th2 = adaptiveThreshold(gray,255,ADAPTIVE_THRESH_MEAN_C,THRESH_BINARY,\n self.thresholdingBlockSize,self.thresholdingConstant)\n #Find edges in image\n edges = Canny(th2,self.edgeThreshold1,self.edgeThreshold2,\n apertureSize = self.edgeApertureSize)\n #Find lines in image\n lines = HoughLines (edges,self.distanceRange,\n self.angleRange,self.lineThreshold)\n if lines is not None:\n for line in lines: \n for rho,theta in line:\n if theta < 0.5 or theta > math.pi-0.5: #~20 degrees\n angle = (rho*self.fieldOfView/image.shape[1])-(self.fieldOfView/2)\n #landmarks.append((radian(angles),self.angularError))\n landmarks.append((rho, theta))\n pass\n return landmarks", "def __call__(self):\n\n dataset = TextOnlyCocoAnnotation()\n\n with open(self.path) as read_file:\n\n json_loaded = json.load(read_file)\n\n for i, value in tqdm(json_loaded['imgs'].items()):\n image_path = os.path.join(os.path.dirname(self.path), 'train2014',\n value['file_name'])\n dataset_type = value['set']\n\n if dataset_type not in self.sets:\n print(dataset_type)\n continue\n\n for annotation_id in json_loaded['imgToAnns'][i]:\n annotation_value = json_loaded['anns'][str(annotation_id)]\n word_annotation = self.parse_annotation_instance(annotation_value)\n dataset.add_bbox(image_path, imagesize.get(image_path), word_annotation)\n\n return dataset", "def gen_landmark_data(src_txt_path, net, augmet=False):\r\n print(\">>>>>> Start landmark data create...Stage: %s\" % net)\r\n save_folder = os.path.join(root_path, '../DATA/12/')\r\n save_image_folder = os.path.join(save_folder, 'train_%s_landmark_aug' % net)\r\n size_of_net = {'PNet': 12, 'RNet': 24, 'ONet': 48}\r\n if net not in size_of_net:\r\n raise Exception(\"The net type error!\")\r\n if not os.path.isdir(save_image_folder):\r\n os.makedirs(save_image_folder)\r\n print('create folder: ', save_image_folder)\r\n save_f = open(os.path.join(save_folder, 'landmark_%s_aug.txt' % size_of_net[net]), 'w')\r\n image_count = 0\r\n # image_path bbox landmark(5*2)\r\n bbox_landmark_info = get_bbox_landmark_from_txt(src_txt_path, data_path='../DATA/landmarks_traindata', with_landmark=True)\r\n for img_path, bbox, landmark_gt in bbox_landmark_info:\r\n f_imgs = list()\r\n f_landmarks = list()\r\n img = cv2.imread(img_path)\r\n assert(img is not None)\r\n img_h, img_w, img_c = img.shape\r\n gt_box = np.array([bbox.left, bbox.top, bbox.right, bbox.bottom])\r\n f_face = img[bbox.top: bbox.bottom+1, bbox.left: bbox.right+1]\r\n f_face = cv2.resize(f_face, (size_of_net[net], size_of_net[net]))\r\n landmark = np.zeros((5, 2))\r\n # normalize\r\n for index, one in enumerate(landmark_gt):\r\n rv = ((one[0]-gt_box[0])/(gt_box[2]-gt_box[0]), (one[1]-gt_box[1])/(gt_box[3]-gt_box[1]))\r\n landmark[index] = rv\r\n f_imgs.append(f_face)\r\n f_landmarks.append(landmark.reshape(10))\r\n landmark = np.zeros((5, 2))\r\n if augmet:\r\n x1, y1, x2, y2 = gt_box\r\n gt_width = x2 - x1 + 1\r\n gt_height = y2 - y1 + 1\r\n if max(gt_width, gt_height) < 40 or x1 < 0 or y1 < 0:\r\n continue\r\n # random shift\r\n for i in range(10):\r\n bbox_size = np.random.randint(int(min(gt_width, gt_height) * 0.8), np.ceil(1.25 * max(gt_width, gt_height)))\r\n # delta_x and delta_y are offsets of (x1, y1)\r\n # max can make sure if the delta is a negative number , x1+delta_x >0\r\n # parameter high of randint make sure there will be intersection between bbox and cropped_box\r\n delta_x = np.random.randint(-gt_width*0.2, gt_width*0.2)\r\n delta_y = np.random.randint(-gt_height*0.2, gt_height*0.2)\r\n nx1 = int(max(x1+gt_width/2 - bbox_size/2 + delta_x, 0))\r\n ny1 = int(max(y1+gt_height/2 - bbox_size/2 + delta_y, 0))\r\n nx2 = nx1 + bbox_size\r\n ny2 = ny1 + bbox_size\r\n if nx2 > img_w or ny2 > img_h:\r\n continue\r\n # print(nx1, ny1, nx2, ny2)\r\n crop_box = np.array([nx1, ny1, nx2, ny2])\r\n cropped_img = img[ny1: ny2+1, nx1: nx2+1, :]\r\n resized_img = cv2.resize(cropped_img, (size_of_net[net], size_of_net[net]))\r\n iou = calc_iou(crop_box, np.expand_dims(gt_box, 0))\r\n if iou <= 0.65:\r\n continue\r\n f_imgs.append(resized_img)\r\n # normalize\r\n for index, one in enumerate(landmark_gt):\r\n rv = ((one[0]-nx1)/bbox_size, (one[1]-ny1/bbox_size))\r\n landmark[index] = rv\r\n f_landmarks.append(landmark.reshape(10))\r\n landmark = np.zeros((5, 2))\r\n # get last landmark from list\r\n landmark_ = f_landmarks[-1].reshape((-1, 2))\r\n bbox = BBox([nx1, ny1, nx2, ny2])\r\n\r\n # mirror\r\n if random.choice([0, 1]) > 0:\r\n face_flipped, landmark_flipped = flip(resized_img, landmark_)\r\n face_flipped = cv2.resize(face_flipped, (size_of_net[net], size_of_net[net]))\r\n # c*h*w\r\n f_imgs.append(face_flipped)\r\n f_landmarks.append(landmark_flipped.reshape(10))\r\n # rotate\r\n if random.choice([0, 1]) > 0:\r\n face_rotated_by_alpha, landmark_rotated = rotate(img, bbox,\r\n bbox.reproject_landmark(landmark_), 5)\r\n # landmark offset\r\n landmark_rotated = bbox.project_landmark(landmark_rotated)\r\n face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (size_of_net[net], size_of_net[net]))\r\n f_imgs.append(face_rotated_by_alpha)\r\n f_landmarks.append(landmark_rotated.reshape(10))\r\n\r\n # flip\r\n face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)\r\n face_flipped = cv2.resize(face_flipped, (size_of_net[net], size_of_net[net]))\r\n f_imgs.append(face_flipped)\r\n f_landmarks.append(landmark_flipped.reshape(10))\r\n # anti-clockwise rotation\r\n if random.choice([0, 1]) > 0:\r\n face_rotated_by_alpha, landmark_rotated = rotate(img, bbox, bbox.reproject_landmark(landmark_), -5)\r\n landmark_rotated = bbox.project_landmark(landmark_rotated)\r\n face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (size_of_net[net], size_of_net[net]))\r\n f_imgs.append(face_rotated_by_alpha)\r\n f_landmarks.append(landmark_rotated.reshape(10))\r\n\r\n face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)\r\n face_flipped = cv2.resize(face_flipped, (size_of_net[net], size_of_net[net]))\r\n f_imgs.append(face_flipped)\r\n f_landmarks.append(landmark_flipped.reshape(10))\r\n f_imgs, f_landmarks = np.asarray(f_imgs), np.asarray(f_landmarks)\r\n for i in range(len(f_imgs)):\r\n # if np.sum(np.where(f_landmarks[i] <= 0, 1, 0)) > 0:\r\n # print('skip image: %d' % i)\r\n # print(f_landmarks[i])\r\n # continue\r\n # if np.sum(np.where(f_landmarks[i] >= 1, 1, 0)) > 0:\r\n # print('skip image: %d', i)\r\n # print(f_landmarks[i])\r\n # continue\r\n path = os.path.join(save_image_folder, '%d.jpg' % image_count)\r\n cv2.imwrite(path, f_imgs[i])\r\n landmarks = map(str, list(f_landmarks[i]))\r\n save_f.write(path + ' -2 ' + ' '.join(landmarks) + '\\n')\r\n image_count += 1\r\n print_str = \"\\rCount: {}\".format(image_count)\r\n sys.stdout.write(print_str)\r\n sys.stdout.flush()\r\n save_f.close()\r\n print('\\n Landmark create done!')", "def _get_annotation(self, image_id):\n annotation_file = self.image_sets_dir / f'{image_id}.xml'\n objects = ET.parse(annotation_file).findall('object')\n boxes = []\n labels = []\n is_difficult = []\n for obj in objects:\n class_name = obj.find('name').text.lower().strip()\n if class_name in self.class_dict:\n bbox = obj.find('bndbox')\n\n x0 = float(bbox.find('xmin').text) - 1\n y0 = float(bbox.find('ymin').text) - 1\n x1 = float(bbox.find('xmax').text) - 1\n y1 = float(bbox.find('ymax').text) - 1\n boxes.append([x0, y0, x1, y1])\n\n labels.append(self.class_dict[class_name])\n\n is_difficult_str = obj.find('difficult').text\n is_difficult.append(int(is_difficult_str) if is_difficult_str else 0)\n\n return (np.array(boxes, dtype=np.float32),\n np.array(labels, dtype=np.int64),\n np.array(is_difficult, dtype=np.uint8))", "def segment_and_find_positions(self):\n initial_image = self.data\n xdim = self.data.shape[0]\n\n ydim = self.data.shape[1]\n downsized_image = transform.resize(\n initial_image,\n (xdim / DOWNSCALING_FACTOR, ydim / DOWNSCALING_FACTOR),\n mode=\"constant\",\n )\n rescaled_image = exposure.rescale_intensity(downsized_image)\n print(\"Starting Canny filtering\")\n g_edges = skimage.feature.canny(\n rescaled_image,\n sigma=self.canny_sigma,\n low_threshold=self.canny_low_threshold,\n )\n print(\"Starting dilation\")\n dilation = morphology.dilation(g_edges, morphology.disk(3))\n print(\"Starting erosion\")\n eroded = morphology.erosion(dilation, morphology.disk(4))\n dilation = morphology.dilation(\n eroded, morphology.diamond(4)\n ) # Dont change to disk\n print(\"Starting to remove small holes\")\n filled = morphology.remove_small_holes(\n dilation, area_threshold=self.remove_small_holes_area_threshold\n )\n print(\"Starting erosion\")\n eroded = morphology.erosion(filled, morphology.diamond(3))\n print(\"Applying filters\")\n filtered_image = eroded\n if self.colony_filters_dict is not None:\n for filter_name in self.colony_filters_dict.keys():\n filtered_image = segmentation_filters.apply_filter(\n filter_name, filtered_image, self.colony_filters_dict[filter_name]\n )\n\n colony_edges = morphology.dilation(feature.canny(filtered_image, 0.01))\n print(\"Starting outlining\")\n outline = downsized_image.copy()\n outline[colony_edges] = 65535\n distance = ndimage.distance_transform_edt(filtered_image)\n smoothed_well = ndimage.gaussian_filter(downsized_image, 0.35)\n outline.copy()\n objs, num_objs = ndimage.label(filtered_image)\n print(\"Applying filters for points\")\n if self.mode == \"A\":\n # point selection: Smoothest point in the center region\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # for each colony,\n # find the maximum distance from the two fold distance map.\n # The edge is at 0% and the center of the colony is at 100%\n d_max = dist_mask.max()\n # Getting the points which is at least 40% away from the edge\n top_percent = dist_mask > (d_max * 0.40)\n colony_mask = smoothed_well * top_percent\n colony_edges = feature.canny(colony_mask, 0.1)\n # applying the second distance transform\n # to find the smoothest point in the correct region\n inner_edges = ndimage.distance_transform_edt(\n ~colony_edges * top_percent\n )\n smooth_point = numpy.where(inner_edges == inner_edges.max())\n smooth_point = (smooth_point[0][0], smooth_point[1][0])\n smooth_point_corrected = (\n smooth_point[0] * DOWNSCALING_FACTOR,\n smooth_point[1] * DOWNSCALING_FACTOR,\n )\n self._point_locations.append(smooth_point_corrected)\n elif self.mode == \"C\":\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # point selection: edge, ridge & center respectively\n self.get_mode_c_points(dist_mask, 0, 0.03)\n self.get_mode_c_points(dist_mask, 0.15, 0.20)\n self.get_mode_c_points(dist_mask, 0.90, 0.99)", "def __call__(self, *args, **kwargs):\n\n dataset = TextOnlyCocoAnnotation()\n\n n_images = 1000 if self.is_train else 500\n for i in tqdm(range(1, n_images + 1)):\n image_path = os.path.join(self.images_folder, 'img_{}.jpg'.format(i))\n annotation_path = os.path.join(self.annotations_folder, 'gt_img_{}.txt'.format(i))\n\n with open(annotation_path, encoding='utf-8-sig') as read_file:\n content = [line.strip() for line in read_file.readlines()]\n for line in content:\n dataset.add_bbox(image_path, imagesize.get(image_path), self.parse_line(line))\n\n return dataset", "def datasetratiocopy_xl_extend(l,ratio,x_offset,y_offset):#只延伸上下两边以及左边的点\r\n dataset=[]\r\n for polyline in l:\r\n newpolyline=[]\r\n for pos in polyline:\r\n pos_x=pos[0]\r\n pos_y=pos[1]\r\n if abs((abs(pos_x)-globalconfig.X_LENGTH/2))<0.01:\r\n if pos_x<0: #judge if the pos is on the origin outline,if on outline,will be moved to the new enlarged outline and plus an extene length\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+(abs(pos_x)/pos_x*globalconfig.X_EXTENDED_LENGTH)+x_offset\r\n else:\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+x_offset \r\n else:\r\n pos_x=pos[0]/ratio+x_offset\r\n if abs((abs(pos_y)-globalconfig.Y_LENGTH/2))<0.01:\r\n pos_y=pos[1]/globalconfig.CENTER_RATIO+(abs(pos_y)/pos_y*globalconfig.Y_EXTENDED_LENGTH)+y_offset\r\n else:\r\n pos_y=pos[1]/ratio+y_offset \r\n newpolyline.append([pos_x,pos_y])\r\n dataset.append(newpolyline)\r\n return dataset", "def _load_pascal_annotation(self, index):\n image = index\n im_path = self.image_path_from_index(image)\n im = cv2.imread(im_path)\n width = im.shape[1]\n height = im.shape[0]\n num_objs = 0\n for ix, obj in enumerate(image.objects):\n if image.objects[ix].x > width - 2 or image.objects[ix].y > height - 2:\n continue \n assert(image.objects[ix].width > 0)\n assert(image.objects[ix].height > 0)\n\n num_objs += 1\n\n boxes = np.zeros((num_objs, 4), dtype=np.float32)\n\n partial_entity_class = np.zeros((num_objs, 96), dtype=np.int32)\n partial_relation_class = np.zeros((num_objs, num_objs, 43), dtype=np.int32)\n gt_classes = np.zeros((0, num_objs, 1), dtype=np.int32)\n overlaps = np.zeros((0, num_objs, self.num_classes), dtype=np.int64)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n queries = np.zeros((0, 235), dtype=np.float32)\n # Load object bounding boxes into a data frame.\n index = 0\n \n for ix, obj in enumerate(image.objects):\n if image.objects[ix].x > width - 2 or image.objects[ix].y > height - 2:\n continue\n # Make pixel indexes 0-based\n x1_offset = 0.0#image.objects[ix].width * (-0.1)\n x2_offset = 0.0#image.objects[ix].width * 0.1\n y1_offset = 0.0#image.objects[ix].height * (-0.1)\n y2_offset = 0.0#image.objects[ix].height * 0.1\n boxes[index][0] = max((image.objects[ix].x + x1_offset), 0.0)\n boxes[index][1] = max((image.objects[ix].y + y1_offset), 0.0)\n boxes[index][2] = min((image.objects[ix].x + x2_offset + image.objects[ix].width), width - 1)\n boxes[index][3] = min((image.objects[ix].y + y2_offset + image.objects[ix].height), height - 1)\n seg_areas[index] = (boxes[index][2] - boxes[index][0] + 1.0) * (boxes[index][3] - boxes[index][1] + 1.0)\n index += 1\n assert (boxes[:, 2] > boxes[:, 0]).all()\n assert (boxes[:, 3]\t > boxes[:, 1]).all() \n #load gt classes\n \n i_index = 0\n for i in range(image.objects_labels.shape[0]):\n if image.objects[i].x > width - 2 or image.objects[i].y > height - 2:\n continue\n partial_entity_class[i_index] = image.objects_labels[i]\n \n j_index = 0\n for j in range(image.objects_labels.shape[0]):\n if image.objects[j].x > width - 2 or image.objects[j].y > height - 2:\n continue\n partial_relation_class[i_index, j_index] = image.predicates_labels[i, j]\n j_index += 1\n i_index += 1\n seen = []\n for query_index in range(image.queries_gt.shape[0]):\n query_gt_classes = np.zeros((1, num_objs, 1), dtype=np.int32)\n query_overlaps = np.zeros((1, num_objs, self.num_classes), dtype=np.int64)\n query_overlaps[0, :, 3] = 1\n query_gt_classes[0, :, 0] = 3\n if image.one_hot_relations_gt[query_index][-1] == 1:\n # print \"negative triplet\"\n continue\n\n sub = image.one_hot_relations_gt[query_index][:96]\n obj = image.one_hot_relations_gt[query_index][96:96 * 2]\n rel = image.one_hot_relations_gt[query_index][96 * 2:]\n key = str(np.argmax(sub)) + \"_\" + str(np.argmax(rel)) + \"_\" + str(np.argmax(obj))\n if key in seen:\n continue\n seen.append(key)\n\n found = False\n i_index = 0\n for i in range(image.objects_labels.shape[0]):\n if image.objects[i].x > width - 2 or image.objects[i].y > height - 2:\n continue\n if not np.array_equal(image.objects_labels[i], sub):\n i_index += 1\n continue\n j_index = 0\n for j in range(image.objects_labels.shape[0]):\n if image.objects[j].x > width - 2 or image.objects[j].y > height - 2:\n continue \n\n if not np.array_equal(image.objects_labels[j], obj):\n j_index += 1\n continue\n if np.array_equal(rel, image.predicates_labels[i, j]):\n query_gt_classes[0, i_index, 0] = 1\n query_overlaps[0, i_index, 1] = 1\n query_overlaps[0, i_index, 3] = 0\n query_gt_classes[0, j_index, 0] = 2\n query_overlaps[0, j_index, 2] = 1\n query_overlaps[0, j_index, 3] = 0\n \n #partial_entity_class[i_index] = sub\n #partial_entity_class[j_index] = obj\n #partial_relation_class[i_index, j_index] = rel\n \n found = True\n j_index += 1\n i_index += 1\n if not found:\n continue\n gt_classes = np.concatenate((gt_classes, query_gt_classes), axis=0)\n overlaps = np.concatenate((overlaps, query_overlaps), axis=0)\n queries = np.concatenate((queries, image.one_hot_relations_gt[query_index].reshape([1,-1])), axis=0)\n\n return {'boxes': boxes,\n 'gt_classes': gt_classes,\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'seg_areas': seg_areas,\n 'query' : queries,\n 'partial_entity_class' : partial_entity_class,\n 'partial_relation_class' : partial_relation_class,\n 'orig_image': None}", "def create_XY(self, data, min_dimension, label_names):\n # Create empty array, X, for the images, and an empty list, y, for the image labels\n X = np.empty((0, min_dimension, min_dimension, 3))\n Y = []\n \n # For each artist name listed in label_names\n for name in label_names:\n \n # Get all images for each artist\n images = glob.glob(os.path.join(data, name, \"*.jpg\"))\n \n # For each image in images \n for image in tqdm(images): # I use tqdm() to allow the user to follow along\n \n # Load image\n loaded_img = cv2.imread(image)\n \n # Resize image to the specified dimensions\n resized_img = cv2.resize(loaded_img, (min_dimension, min_dimension), interpolation = cv2.INTER_AREA) # INTER_AREA means that it is resizing using pixel-area relation which was a suggested method by Ross\n \n # Create array of image\n image_array = np.array([np.array(resized_img)])\n \n # Append to trainX array and trainY list\n X = np.vstack((X, image_array))\n Y.append(name)\n \n return X, Y", "def extract_raster_dataset(dataset, mode=\"center\", nodata=None):\n\n values = read_gdal_values(dataset, nodata=nodata)\n\n coords = read_gdal_coordinates(dataset, mode=mode)\n\n projection = read_gdal_projection(dataset)\n\n return values, coords, projection", "def get_analyser_data(self):\n counts = self.get_roi_counts()\n thresholds = self.get_roi_thresholds()\n roi_coords = self.get_roi_coords()\n return [counts,thresholds,roi_coords]", "def data_splits(im_dir='/media/ignacio/Datos/plant_net/images_ori', tag=False):\n homedir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n splits_dir = os.path.join(homedir, 'data', 'data_splits')\n print(\"Loading data...\")\n file_list = os.listdir(splits_dir)\n\n # Metadata labels\n metadata = np.genfromtxt(os.path.join(splits_dir, 'synsets.txt'), dtype='str', delimiter='/n')\n\n # Training splits\n train = np.genfromtxt(os.path.join(splits_dir, 'train.txt'), dtype='str', delimiter=' ')\n y_train = train[:, -1].astype(np.int32)\n if tag:\n X_train = train[:, 0:2].astype(object)\n X_train[:, 0] = np.array([os.path.join(im_dir, i) for i in X_train[:, 0]])\n else:\n X_train = np.array([os.path.join(im_dir, i) for i in train[:, 0]])\n\n # Validation splits\n if 'val.txt' in file_list:\n val = np.genfromtxt(os.path.join(splits_dir, 'val.txt'), dtype='str', delimiter=' ')\n y_val = val[:, -1].astype(np.int32)\n if tag:\n X_val = val[:, 0:2].astype(object)\n X_val[:, 0] = np.array([os.path.join(im_dir, i) for i in X_val[:, 0]])\n else:\n X_val = np.array([os.path.join(im_dir, i) for i in val[:, 0]])\n else:\n print 'Training with no validation data.'\n X_val, y_val = None, None\n\n return X_train, y_train, X_val, y_val, metadata", "def get_label_areas(pat_slice_rois):\n # Note labels_target_roi is a dictionary with key=slice_id and hence length = #slices\n\n num_of_slices = len(pat_slice_rois)\n # 8 indices: as always for all eight tissue classes 0-3=ES and 4-7=ED\n pat_slice_areas = np.zeros((8, num_of_slices))\n for slice_id, slice_rois in pat_slice_rois.items():\n # slice_rois is again a dictionary with key=tissue class index AND value=tuple(slice_x, slice_y)\n for cls_idx in slice_rois.keys():\n roi_box_cls = BoundingBox(slice_rois[cls_idx][0], slice_rois[cls_idx][1])\n area_cls = roi_box_cls.width * roi_box_cls.height\n pat_slice_areas[cls_idx, slice_id] = area_cls\n return pat_slice_areas", "def find_landmarks(self, image, detection):\n\n try:\n shape = self.shape_predictor(image, detection)\n coords = np.zeros((68, 2))\n\n for i in range(0, 68):\n coords[i] = (shape.part(i).x, shape.part(i).y)\n\n return coords\n\n except RuntimeError:\n return None", "def __get_landmarks(self, image):\n try:\n rects = self.detector(image, 1)\n size = len(rects)\n if size == 0:\n return None, None\n return numpy.matrix([[p.x, p.y] for p in self.predictor(image, rects[0]).parts()])\n except Exception:\n return None", "def read_roi_data(self) -> None:\n\n if not os.path.isfile(self.roi_coordinates_path):\n raise NoROIDataError(\n msg=\"SIMBA ERROR: No ROI definitions were found in your SimBA project. Please draw some ROIs before analyzing your ROI data\"\n )\n else:\n self.rectangles_df = pd.read_hdf(\n self.roi_coordinates_path, key=Keys.ROI_RECTANGLES.value\n ).dropna(how=\"any\")\n self.circles_df = pd.read_hdf(\n self.roi_coordinates_path, key=Keys.ROI_CIRCLES.value\n ).dropna(how=\"any\")\n self.polygon_df = pd.read_hdf(\n self.roi_coordinates_path, key=Keys.ROI_POLYGONS.value\n )\n if \"Center_XCenter_Y\" in self.polygon_df.columns:\n self.polygon_df = self.polygon_df.drop([\"Center_XCenter_Y\"], axis=1)\n self.polygon_df = self.polygon_df.dropna(how=\"any\")\n self.shape_names = list(\n itertools.chain(\n self.rectangles_df[\"Name\"].unique(),\n self.circles_df[\"Name\"].unique(),\n self.polygon_df[\"Name\"].unique(),\n )\n )\n self.roi_dict = {\n Keys.ROI_RECTANGLES.value: self.rectangles_df,\n Keys.ROI_CIRCLES.value: self.circles_df,\n Keys.ROI_POLYGONS.value: self.polygon_df,\n }\n self.roi_types_names_lst = set()\n for idx, r in self.roi_dict[Keys.ROI_RECTANGLES.value].iterrows():\n self.roi_types_names_lst.add(f'Rectangle: {r[\"Name\"]}')\n for idx, r in self.roi_dict[Keys.ROI_CIRCLES.value].iterrows():\n self.roi_types_names_lst.add(f'Circle: {r[\"Name\"]}')\n for idx, r in self.roi_dict[Keys.ROI_POLYGONS.value].iterrows():\n self.roi_types_names_lst.add(f'Polygon: {r[\"Name\"]}')\n self.roi_types_names_lst = list(self.roi_types_names_lst)\n for shape_type, shape_data in self.roi_dict.items():\n if shape_type == Keys.ROI_CIRCLES.value:\n self.roi_dict[Keys.ROI_CIRCLES.value][\"Center_X\"] = self.roi_dict[\n Keys.ROI_CIRCLES.value\n ][\"centerX\"]\n self.roi_dict[Keys.ROI_CIRCLES.value][\"Center_Y\"] = self.roi_dict[\n Keys.ROI_CIRCLES.value\n ][\"centerY\"]\n elif shape_type == Keys.ROI_RECTANGLES.value:\n self.roi_dict[Keys.ROI_RECTANGLES.value][\n \"Center_X\"\n ] = self.roi_dict[Keys.ROI_RECTANGLES.value][\"Bottom_right_X\"] - (\n (\n self.roi_dict[Keys.ROI_RECTANGLES.value][\"Bottom_right_X\"]\n - self.roi_dict[Keys.ROI_RECTANGLES.value][\"width\"]\n )\n / 2\n )\n self.roi_dict[Keys.ROI_RECTANGLES.value][\n \"Center_Y\"\n ] = self.roi_dict[Keys.ROI_RECTANGLES.value][\"Bottom_right_Y\"] - (\n (\n self.roi_dict[Keys.ROI_RECTANGLES.value][\"Bottom_right_Y\"]\n - self.roi_dict[Keys.ROI_RECTANGLES.value][\"height\"]\n )\n / 2\n )\n elif shape_type == Keys.ROI_POLYGONS.value:\n self.roi_dict[Keys.ROI_POLYGONS.value][\"Center_X\"] = self.roi_dict[\n Keys.ROI_POLYGONS.value\n ][\"Center_X\"]\n self.roi_dict[Keys.ROI_POLYGONS.value][\"Center_Y\"] = self.roi_dict[\n Keys.ROI_POLYGONS.value\n ][\"Center_Y\"]", "def get_x_y(self, indices: List[int], raw=False):\n\n annotations = []\n batch_of_input_images, batch_of_mask_sets, batch_of_bbox_sets, batch_of_label_sets, num_labels = super(RetinaDataset, self)._get_x_y(\n indices=indices,\n autoscale=True,\n use_masks=False,\n do_preprocessing=True,\n downscale=True\n )\n\n # Extract boxes\n for batch, sets in enumerate(zip(batch_of_input_images, batch_of_bbox_sets, batch_of_label_sets)):\n image, box_set, label_set = sets\n annotations.append({\n 'bboxes': box_set,\n 'labels': label_set\n })\n\n # Uncomment for DEBUG\n # ==========================\n # # ==========================\n # if self.is_training_dataset:\n # draw = image.copy()\n #\n # draw[..., 0] += 123.68 # R\n # draw[..., 1] += 116.779 # G\n # draw[..., 2] += 103.939 # B\n #\n # for label, box in zip(label_set, box_set):\n # draw_box(draw, [int(box[1]), int(box[0]), int(box[3]), int(box[2])], color=(255, 200, 0))\n # caption = \"{} {:.3f}\".format(label, 0)\n #\n # # print(self.labels.index(obj['name']) )\n #\n # cv2.putText(\n # img=draw,\n # text=caption,\n # org=(int(box[0]), int(box[1]) - 10),\n # fontFace=cv2.FONT_HERSHEY_PLAIN,\n # fontScale=1,\n # color=(255, 200, 0),\n # thickness=1)\n #\n # from matplotlib import pyplot as plt\n # fig = plt.figure(figsize=(10,15))\n # plt.axis('off')\n # try:\n # plt.imshow(draw.astype(np.uint8))\n # except:\n # pass\n # # plt.show()\n # Image.fromarray(draw.astype('uint8')).save('train_images/{}.png'.format(randint(0, 1000)))\n # # with open('train_images/{}.png'.format(randint(0, 1000)), 'wb') as f:\n # # fig.savefig(f, format='png')\n\n # # exit(0)\n # ==========================\n # ==========================\n\n # Compute regression targets\n targets = (batch_of_input_images, annotations) if raw else self.compute_targets(batch_of_input_images, annotations)\n # batch_of_input_images = self.compute_inputs(batch_of_input_images)\n return batch_of_input_images, list(targets)", "def calcdata(self):\n bot = max(0, self.r - self.radius)\n top = min(self.img.shape[0], self.r + self.radius)\n left = max(0, self.c - self.radius)\n right = min(self.img.shape[1], self.c + self.radius)\n self.data = self.img[bot:top, left:right]", "def process_set_metadata(self, data, set_name):\n hdf5_handler = self.hdf5_manager.get_group(set_name)\n image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])\n if 'test' in set_name:\n is_test = True\n data_ = data[0]\n filename_ids = data[1]\n annotations = data[2]\n category = data[3]\n supercategory = data[4]\n category_id = data[5]\n else:\n is_test = False\n data_ = data[0]\n annotations = data[1]\n annotation_id_dict = data[2]\n category = data[3]\n supercategory = data[4]\n category_id = data[5]\n filename_ids = data[6]\n images_fname_by_id = data[7]\n skeleton = data[8]\n keypoints = data[9]\n\n keypoints_ = str2ascii(keypoints)\n skeleton_ = np.array(pad_list(skeleton, -1), dtype=np.uint8)\n\n category_ = str2ascii(category)\n supercategory_ = str2ascii(supercategory)\n\n image_filenames = []\n coco_urls = []\n width = []\n height = []\n image_id = []\n\n annotation_id = []\n area = []\n iscrowd = [0, 1]\n segmentation = []\n num_keypoints = list(range(0, 17 + 1))\n keypoints_list = []\n bbox = []\n object_id = []\n\n # coco id lists\n # These are order by entry like in the annotation files.\n # I.e., coco_images_ids[0] has the object_id with the file_name, id, height, etc.\n # as coco_annotation_file[set_name][\"images\"][0]\n coco_images_ids = []\n coco_categories_ids = []\n coco_annotations_ids = []\n\n if is_test:\n object_fields = [\"image_filenames\", \"coco_urls\", \"width\", \"height\"]\n else:\n object_fields = [\"image_filenames\", \"coco_urls\", \"width\", \"height\",\n \"category\", \"supercategory\", \"boxes\", \"area\",\n \"iscrowd\", \"segmentation\",\n \"image_id\", \"category_id\", \"annotation_id\",\n \"num_keypoints\", \"keypoints\"]\n\n list_boxes_per_image = []\n list_keypoints_per_image = []\n list_object_ids_per_image = []\n list_image_filenames_per_num_keypoints = []\n list_object_ids_per_keypoint = [] # body part\n\n if self.verbose:\n print('> Adding data to default group:')\n prgbar = progressbar.ProgressBar(max_value=len(data_))\n\n counter = 0\n tmp_coco_annotations_ids = {}\n\n for i, key in enumerate(data_):\n annotation = data_[key]\n image_filenames.append(annotation[\"file_name\"])\n width.append(annotation[\"width\"])\n height.append(annotation[\"height\"])\n coco_urls.append(annotation[\"coco_url\"])\n image_id.append(annotation[\"id\"])\n\n if is_test:\n # *** object_id ***\n # [filename, coco_url, width, height]\n object_id.append([i, i, i, i])\n list_object_ids_per_image.append([i])\n else:\n boxes_per_image = []\n\n if \"object\" in annotation:\n for j, obj_idx in enumerate(annotation[\"object\"]):\n obj = annotation[\"object\"][obj_idx]\n area.append(obj[\"area\"])\n bbox.append(obj[\"bbox\"])\n annotation_id.append(obj[\"id\"])\n segmentation.append(obj[\"segmentation\"])\n keypoints_list.append(obj[\"keypoints\"])\n\n # *** object_id ***\n # [filename, coco_url, width, height,\n # category, supercategory,\n # bbox, area, iscrowd, segmentation,\n # \"image_id\", \"category_id\", \"annotation_id\"\n # \"num_keypoints\", \"keypoints\"]\n object_id.append([i, i, i, i,\n category.index(obj[\"category\"]), supercategory.index(\n obj[\"supercategory\"]),\n counter, counter, obj[\"iscrowd\"], counter,\n i, category.index(obj[\"category\"]), counter,\n obj[\"num_keypoints\"], counter])\n\n boxes_per_image.append(counter)\n\n # temporary var\n tmp_coco_annotations_ids[obj[\"id\"]] = counter\n\n # update counter\n counter += 1\n\n list_boxes_per_image.append(boxes_per_image)\n list_keypoints_per_image.append(boxes_per_image)\n list_object_ids_per_image.append(boxes_per_image)\n\n # update progressbar\n if self.verbose:\n prgbar.update(i)\n\n # update progressbar\n if self.verbose:\n prgbar.finish()\n\n if self.verbose:\n print('> Processing coco lists:')\n prgbar = progressbar.ProgressBar(max_value=len(annotations['images']))\n\n # set coco id lists\n for i, annot in enumerate(annotations['images']):\n fname_id = image_filenames.index(os.path.join(image_dir, annot['file_name']))\n coco_images_ids.append(fname_id)\n\n # update progressbar\n if self.verbose:\n prgbar.update(i)\n\n # update progressbar\n if self.verbose:\n prgbar.finish()\n\n coco_categories_ids = list(range(len(category)))\n\n if not is_test:\n if self.verbose:\n prgbar = progressbar.ProgressBar(max_value=len(annotations['annotations']))\n for i, annot in enumerate(annotations['annotations']):\n annot_id = tmp_coco_annotations_ids[annot['id']]\n coco_annotations_ids.append(annot_id)\n\n # update progressbar\n if self.verbose:\n prgbar.update(i)\n\n # update progressbar\n if self.verbose:\n prgbar.finish()\n\n # process lists\n if not is_test:\n if self.verbose:\n print('> Processing lists...')\n\n for i in range(len(keypoints)):\n imgs_per_num = [val[0] for _, val in enumerate(object_id) if val[8] == i]\n imgs_per_num = list(set(imgs_per_num)) # get unique values\n imgs_per_num.sort()\n list_image_filenames_per_num_keypoints.append(imgs_per_num)\n\n for i in range(len(keypoints)):\n objs_per_keypoint = [j for j, val in enumerate(\n keypoints_list) if val[i * 3] > 0 or val[i * 3 + 1] > 0]\n objs_per_keypoint = list(set(objs_per_keypoint)) # get unique values\n objs_per_keypoint.sort()\n list_object_ids_per_keypoint.append(objs_per_keypoint)\n\n hdf5_write_data(hdf5_handler, 'image_filenames',\n str2ascii(image_filenames), dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'coco_urls',\n str2ascii(coco_urls), dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'width',\n np.array(width, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'height',\n np.array(height, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'category',\n category_, dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'supercategory',\n supercategory_, dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'image_id',\n np.array(image_id, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'category_id',\n np.array(category_id, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'object_ids',\n np.array(object_id, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'object_fields',\n str2ascii(object_fields), dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'coco_images_ids',\n np.array(coco_images_ids, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'coco_categories_ids',\n np.array(coco_categories_ids, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'list_object_ids_per_image',\n np.array(pad_list(list_object_ids_per_image, -1), dtype=np.int32),\n fillvalue=-1)\n\n if not is_test:\n hdf5_write_data(hdf5_handler, 'annotation_id',\n np.array(annotation_id, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'keypoint_names',\n keypoints_, dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'skeleton',\n skeleton_, dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'boxes',\n np.array(bbox, dtype=np.float),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'iscrowd',\n np.array(iscrowd, dtype=np.uint8),\n fillvalue=-1)\n\n nrows = len(segmentation)\n ncols = max([len(elem) for elem in segmentation])\n dset = hdf5_handler.create_dataset('segmentation',\n (nrows, ncols),\n dtype=np.float,\n chunks=True,\n compression=\"gzip\",\n compression_opts=4,\n fillvalue=-1)\n\n if self.verbose:\n print(' -- Saving segmentation masks to disk (this will take some time)')\n prgbar = progressbar.ProgressBar(max_value=nrows)\n for i in range(nrows):\n dset[i, :len(segmentation[i])] = np.array(segmentation[i], dtype=np.float)\n if self.verbose:\n prgbar.update(i)\n\n if self.verbose:\n prgbar.finish()\n\n hdf5_write_data(hdf5_handler, 'area',\n np.array(area, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'num_keypoints',\n np.array(num_keypoints, dtype=np.uint8),\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'keypoints',\n np.array(keypoints_list, dtype=np.int32),\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'coco_annotations_ids',\n np.array(coco_annotations_ids, dtype=np.int32),\n fillvalue=-1)\n\n pad_value = -1\n hdf5_write_data(hdf5_handler, 'list_boxes_per_image',\n np.array(pad_list(list_boxes_per_image, pad_value), dtype=np.int32),\n fillvalue=pad_value)\n hdf5_write_data(hdf5_handler, 'list_keypoints_per_image',\n np.array(pad_list(list_keypoints_per_image, pad_value), dtype=np.int32),\n fillvalue=pad_value)\n hdf5_write_data(hdf5_handler, 'list_image_filenames_per_num_keypoints',\n np.array(pad_list(list_image_filenames_per_num_keypoints,\n pad_value), dtype=np.int32),\n fillvalue=pad_value)\n hdf5_write_data(hdf5_handler, 'list_object_ids_per_keypoint',\n np.array(pad_list(list_object_ids_per_keypoint,\n pad_value), dtype=np.int32),\n fillvalue=pad_value)", "def get_data():\r\n spatial_expmat = np.load('/home/anniegao/spatial_magan/data/spatial_pca_with_coords.npz')['arr_0']\r\n spatial_expmat[:,100:] *= 5\r\n rna_expmat = np.load('/home/anniegao/spatial_magan/data/rna_pca_sampled.npz')['arr_0']\r\n spatial_pca_components = np.load('/home/anniegao/spatial_magan/data/spatial_pca_100components.npz')['arr_0']\r\n rna_pca_components = np.load('/home/anniegao/spatial_magan/data/rna_pca_100components.npz')['arr_0']\r\n spatial_cluster_labels = np.load('/home/anniegao/spatial_magan/data/spatial_cluster_3_labels_phate.npz')['arr_0']\r\n rna_cluster_labels = np.load('/home/anniegao/spatial_magan/data/rna_cluster_5_labels_sampled.npz')['arr_0']\r\n return spatial_expmat, rna_expmat, spatial_pca_components, rna_pca_components, spatial_cluster_labels, rna_cluster_labels", "def create_maps(self,data,tod,mjd,coords):\n features = np.log10(self.getFeatures(data))/np.log10(2)\n special_idx = np.where((features==16))[0]\n # This is for getting the stare data on more recent\n # calibration observations.\n point_data = self.get_point_data(data,special_idx)\n \n cel_maps = self.create_single_map(tod,\n coords['ra'],\n coords['dec'],\n self.source_positions['ra'][coords['sky_data_flag']],\n self.source_positions['dec'][coords['sky_data_flag']])\n az_maps = self.create_single_map(tod,\n coords['az'],\n coords['el'],\n self.source_positions['az'][coords['sky_data_flag']],\n self.source_positions['el'][coords['sky_data_flag']])\n cel_maps= self.average_maps(cel_maps)\n az_maps = self.average_maps(az_maps)\n xygrid = np.meshgrid((np.arange(self.Nx)+0.5)*self.dx - self.Nx*self.dx/2.,\n (np.arange(self.Ny)+0.5)*self.dy - self.Ny*self.dy/2.)\n \n \n cel_maps['xygrid']=xygrid\n cel_maps['StareCoords']= {**point_data,'pa':np.nanmean(self.source_positions['pa'])}\n az_maps['xygrid']=xygrid\n az_maps['StareCoords'] = {**point_data,'pa':np.nanmean(self.source_positions['pa'])}\n return cel_maps,az_maps", "def spatial(self):", "def identify_dbs(image):\n locations = {\"red\": Point(), \"green\": Point(), \"blue\": Point()}\n masks = {\"red\": [], \"green\": [], \"blue\": []}\n\n bridge = cv_bridge.CvBridge()\n image = bridge.imgmsg_to_cv2(image, \"bgr8\")\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n # upper and lower bounds for red\n # using python 3 bgr [0,0,188] = hsv [0, 255, 188]\n lower_red = numpy.array([0, 100, 100]) \n upper_red = numpy.array([10, 255, 255])\n masks[\"red\"] = cv2.inRange(hsv, lower_red, upper_red)\n\n # upper and lower bounds for green\n # using python 3 bgr [0,175,0] = hsv [60, 255, 175]\n lower_green = numpy.array([50, 100, 100]) \n upper_green = numpy.array([70, 255, 255])\n masks[\"green\"] = cv2.inRange(hsv, lower_green, upper_green)\n\n # upper and lower bounds for blue\n # using python 3 bgr [176, 0, 17] = hsv [123, 255, 176]\n lower_blue = numpy.array([113, 100, 100])\n upper_blue = numpy.array([133, 255, 255])\n masks[\"blue\"] = cv2.inRange(hsv, lower_blue, upper_blue)\n\n x, y, w, h = 0, 0, image.shape[1]//3, image.shape[0]\n\n for color, mask in masks.items():\n pixels = {\"left\": 0, \"middle\": 0, \"right\": 0}\n \n # define section of image to use for left, middle and right\n left = mask[y:y+h, x:x+w]\n middle = mask[y:y+h, x+w:x+w+w]\n right = mask[y:y+h, x+w+w:x+3*w]\n\n # count the number of pixels in each section\n pixels[\"left\"] = cv2.countNonZero(left)\n pixels[\"middle\"] = cv2.countNonZero(middle)\n pixels[\"right\"] = cv2.countNonZero(right)\n location = max(pixels, key=pixels.get)\n\n # map the relative position of the db (left, middle, right) to the correct Point()\n locations[color] = db_locations[location]\n \n return locations", "def img_to_df(img_name, max_lat, min_lat, max_long, min_long):\n\n # read in the image\n dataset = rasterio.open(img_name)\n\n # band1 contains the biomass data we are interested in\n band1 = dataset.read(1)\n data = band1\n\n height = dataset.height\n width = dataset.width\n\n # longitude_delta is the length of each pixel in the x direction\n diff_long = max_long - min_long\n longitude_delta = diff_long / width\n\n # latitude_delta is the length of each pixel in the y direction\n diff_lat = max_lat - min_lat\n latitude_delta = diff_lat / height\n\n\n # loop over all the pixels in the map\n lat = max_lat\n long = min_long\n lat_long_data = []\n for x in range(0, width):\n lat = max_lat # Set longitude to far North (Top)\n for y in range(0, height):\n bm = data[y, x] # get the biomass at this lat, long\n if bm > 0:\n print(str(lat) + \" \" + str(long) + \" \" + str(bm))\n lat_long_data.append([lat, long, bm])\n lat = lat - latitude_delta\n long = long + longitude_delta\n\n # convert to a dataframe, and return\n return pd.DataFrame(data=lat_long_data, columns=['latitude', 'longitude', 'biomass'])", "def get_xyz_coord(path):\r\n\tlabels = loadmat(path)\r\n\tanno_xyz = []\r\n\tfor index in range(0, 1500):\r\n\t\tanno_xyz.append([])\r\n\t\tfor i in range(0, 21):\r\n\t\t\tx = labels['handPara'][0][i][index]\r\n\t\t\ty = labels['handPara'][1][i][index]\r\n\t\t\tz = labels['handPara'][2][i][index]\r\n\t\t\tanno_xyz[-1].append([x, y, z])\r\n\tanno_xyz = np.array(anno_xyz)\r\n\t# anno_xyz = np.reshape(labels['handPara'], (1500, 21, 3))\r\n\treturn anno_xyz", "def la(x) :\r\n return Feature(x, \"leaf_area\")", "def test_select_roi():\n _c = io.create_sample_Dataset(n_frames=5, rows=10, cols=10)\n _c = _c.sel(x=slice(35, 70), y=slice(30, 90))\n assert _c.u.shape == (7, 2, 5) # note the last dimension is preserved", "def get_dataset(self, line_index):\n sample = self._samples[line_index]\n\n with self.env.begin() as txn:\n basename = Path(sample['file_path']).basename()\n data = txn.get(basename.encode(\"ascii\"))\n img = pickle.loads(data)\n\n x = Augmentor.preprocess(img, (128, 32), self._augment)\n y = [self._char_table.get_label(ch) for ch in sample['text']]\n\n return x, y", "def extract_data(filename: str, directory: str) -> Dict:\n with open(filename) as f:\n lines = f.readlines()\n\n # Split data by :\n annotations = [line.replace(\" \", \"\").split(\":\") for line in lines]\n\n # Split data by ;\n for annotation in annotations:\n annotation[1] = annotation[1].split(\";\")\n\n # Loop for saving metadata into dictionary\n annot_dict = dict()\n for annotation in annotations:\n img = annotation[0]\n bbox_metadata = annotation[1]\n bbox = list()\n \n # Path to images\n img_path = os.path.join(directory, img)\n im = Image.open(img_path)\n width, height = im.size\n\n # Iterate over each bounding box\n for annot in bbox_metadata:\n \n if \"MISC_SIGNS\" == annot:\n signStatus = 'N/A'\n signTypes = \"MISC_SIGNS\"\n signPurpose = 'N/A'\n\n signBB = (-1, -1, -1, -1)\n signC = (-1, -1)\n signSize = 0\n aspectRatio = 0\n\n bbox.append({\"signStatus\": signStatus, \n \"signTypes\": signTypes, \n \"signPurpose\": signPurpose, \n \"signBB\": signBB, \n \"signC\": signC, \n \"signSize\": signSize, \n \"aspectRatio\": aspectRatio})\n elif \"\\n\" in annot:\n pass\n else:\n data = annot.split(\",\")\n \n signStatus = data[0] # signStatus\n signTypes = data[6] # signTypes\n signPurpose = data[5] # PROHIBITORY, WARNING, OTHER, INFORMATION\n tl_x, tl_y, br_x, br_y = data[3], data[4], data[1], data[2]\n \n if is_valid_decimal(tl_x):\n tl_x = float(tl_x)\n else:\n tl_x = float(cutoff_letter(tl_x))\n\n if is_valid_decimal(tl_y):\n tl_y = float(tl_y)\n else:\n tl_y = float(cutoff_letter(tl_y))\n\n if is_valid_decimal(br_x):\n br_x = float(br_x)\n else:\n br_x = float(cutoff_letter(br_x))\n\n if is_valid_decimal(br_y):\n br_y = float(br_y)\n else:\n br_y = float(cutoff_letter(br_y))\n\n if tl_x < 0:\n tl_x = 0\n elif tl_x > width:\n tl_x = width\n \n if tl_y < 0:\n tl_y = 0\n elif tl_y > height:\n tl_y = height\n \n if br_x < 0:\n br_x = 0\n elif br_x > width:\n br_x = width\n \n if br_y < 0:\n br_y = 0\n elif br_y > height:\n br_y = height\n\n signBB = (tl_x, tl_y, br_x, br_y)\n signC = (br_x + tl_x)/2, (br_y + tl_y)/2\n signSize = (br_x - tl_x) * (br_y - tl_y)\n aspectRatio = (br_x - tl_x) / (br_y - tl_y)\n\n bbox.append({\"signStatus\": signStatus, \n \"signTypes\": signTypes, \n \"signPurpose\": signPurpose, \n \"signBB\": signBB, \n \"signC\": signC, \n \"signSize\": signSize, \n \"aspectRatio\": aspectRatio})\n \n \n annot_dict[img_path] = bbox\n return annot_dict", "def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData", "def get_dataset(reader: DataReader):\n\n xs = []\n ys = []\n\n for annotation_sentences in reader.annotations:\n for annotation in annotation_sentences:\n xs.append([annotation.fee_raw] + annotation.sentence)\n ys.append(annotation.frame)\n\n return xs, ys", "def image_to_spots(self, data_image: Union[np.ndarray, xr.DataArray]) -> SpotAttributes:\n raise NotImplementedError()", "def _get_data(filename: str, image_path: str, annotation_path: str) -> Data:\n data = Data(os.path.join(image_path, f\"{filename}.jpg\"))\n box2d = []\n with open(os.path.join(annotation_path, f\"{filename}.xml\"), \"r\", encoding=\"utf-8\") as fp:\n objects = xmltodict.parse(fp.read())[\"annotation\"][\"object\"]\n if not isinstance(objects, list):\n objects = [objects]\n for obj in objects:\n attributes = {attribute: bool(int(obj[attribute])) for attribute in _BOOLEAN_ATTRIBUTES}\n attributes[\"pose\"] = obj[\"pose\"]\n bndbox = obj[\"bndbox\"]\n box2d.append(\n LabeledBox2D(\n float(bndbox[\"xmin\"]),\n float(bndbox[\"ymin\"]),\n float(bndbox[\"xmax\"]),\n float(bndbox[\"ymax\"]),\n category=obj[\"name\"],\n attributes=attributes,\n )\n )\n data.label.box2d = box2d\n return data", "def map_segmentation_to_dataframe( segmentation_type, segmentation_image ):\n mydf_fn = get_data( segmentation_type )\n mydf = pd.read_csv( mydf_fn )\n mylgo = ants.label_geometry_measures( segmentation_image )\n return pd.merge( mydf, mylgo, how='left', on=[\"Label\"] )", "def nine_regions(self):\n\n coordinateList = []\n\n # Top left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] )\n coordinateList.append( [x, y] )\n\n # Top center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] ) \n coordinateList.append( [x, y] )\n\n # Top right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * ( 1.0 - self.ratioTopLeft[IDX_X] ) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] )\n coordinateList.append( [x, y] )\n\n # Center left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Center right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * (1.0 - self.ratioTopLeft[IDX_X]) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Bottom left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n # Bottom center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n # Bottom right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * (1.0 - self.ratioTopLeft[IDX_X]) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n return coordinateList", "def mark_regions_image(self, image, stats):\n detector = cv2.SimpleBlobDetector_create()\n keypoints = detector.detect(image)\n output = cv2.drawKeypoints(image, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n cv2.namedWindow(\"marks\", cv2.WINDOW_AUTOSIZE)\n cv2.imshow(\"marks\", output)\n cv2.waitKey(0)\n cv2.destroyWindow(\"marks\")\n return output", "def load_dataset(image_home, mask_home, patient_list, \n size = 512, \n downsample = 0.5, \n overlap = 1.5, \n verbose=False):\n\n image_list = np.concatenate([sorted(glob.glob(f'{image_home}/{p}/*')) for p in patient_list])\n mask_list = np.concatenate([sorted(glob.glob(f'{mask_home}/{p}/*')) for p in patient_list])\n\n if verbose:\n for i, (im, m) in enumerate(zip(image_list, mask_list)):\n print(i, im, m)\n\n x = []\n y = [] \n\n for im, m in zip(image_list, mask_list):\n image = cv2.imread(im)[:,:,::-1]\n mask = cv2.imread(m, -1)\n mask = squash_labels(mask)\n \n image = cv2.resize(image, dsize=(0,0), fx=downsample, fy=downsample)\n mask = cv2.resize(mask, dsize=(0,0), fx=downsample, fy=downsample,\n interpolation=cv2.INTER_NEAREST)\n\n # assert (image.shape == mask.shape).all()\n split_x , split_y = split(image, mask, int(size * downsample), overlap)\n\n x.append(split_x)\n y.append(split_y)\n\n\n x = np.concatenate(x, axis=0)\n y = np.concatenate(y, axis=0)\n y = np.eye(N=y.shape[0], M=4)[y]\n\n shuffle = np.arange(x.shape[0]).astype(np.int)\n np.random.shuffle(shuffle)\n x = x[shuffle, :]\n y = y[shuffle, :]\n\n x = (x / 255.).astype(np.float32)\n\n print('split_datasets returning x:', x.shape, x.dtype, x.min(), x.max())\n print('split_datasets returning y:', y.shape, y.dtype)\n return x, y", "def social_infrastructure_point(osm_path): \n df_all = retrieve(osm_path,'points',['other_tags']).rename(columns={'other_tags': 'asset'}) \n \n #get requested healthcare assets categorized under the key 'healthcare' with correct formatting \n df_h = healthcare_filter(df_all)\n \n #get requested healthcare assets categorized under the key 'amenity' \n df_a = pandas.DataFrame(columns=['osm_id','asset','geometry']) #create df for saving data\n for row in range(len(df_all.index)): \n if 'amenity' in df_all[\"asset\"][row]: \n if not 'healthcare' in df_all[\"asset\"][row]: #check if healthcare key is present\n df_a = df_a.append(df_all.loc[row]) #if so, save in df\n \n if '\"amenity\"=>\"doctors\"' in df_a[\"asset\"][row]:\n df_a[\"asset\"][row] = 'doctors' #to be consistent with asset list \n elif '\"amenity\"=>\"pharmacy\"' in df_a[\"asset\"][row]:\n df_a[\"asset\"][row] = 'pharmacy'\n elif '\"amenity\"=>\"hospital\"' in df_a[\"asset\"][row]:\n df_a[\"asset\"][row] = 'hospital'\n elif '\"amenity\"=>\"clinic\"' in df_a[\"asset\"][row]:\n df_a[\"asset\"][row] = 'clinic'\n elif '\"amenity\"=>\"dentist\"' in df_a[\"asset\"][row]:\n df_a[\"asset\"][row] = 'dentist'\n else:\n df_a = df_a.drop(index=row)\n \n df_social_points = df_a.append(df_h)\n \n return df_social_points.reset_index(drop=True)", "def _load_kitti_annotation(self, index):\n\n if self._image_set == 'test':\n lines = []\n else:\n filename = os.path.join(self._data_path, 'training', 'label_2', index + '.txt')\n lines = []\n with open(filename) as f:\n for line in f:\n words = line.split()\n cls = words[0]\n truncation = float(words[1])\n occlusion = int(words[2])\n height = float(words[7]) - float(words[5])\n if cls in self._class_to_ind and truncation < 0.5 and occlusion < 3 and height > 25:\n #if cls in self._class_to_ind:\n lines.append(line)\n\n num_objs = len(lines)\n \n boxes = np.zeros((num_objs, 4), dtype=np.float32)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n\n for idx, line in enumerate(lines):\n words = line.split()\n cls = self._class_to_ind[words[0]]\n boxes[idx, :] = [float(num) for num in words[4:8]]\n gt_classes[idx] = cls\n overlaps[idx, cls] = 1.0\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'boxes' : boxes,\n 'gt_classes' : gt_classes,\n 'gt_overlaps' : overlaps,\n 'flipped' : False}", "def createDataset_inpainting(outputPath, imagePathList, labelList):\n assert (len(imagePathList) == len(box_x_list) == len(box_y_list))\n nSamples = len(imagePathList)\n if not os.path.exists(outputPath):\n os.mkdir(outputPath)\n env = lmdb.open(outputPath, map_size=1099511627776)\n cache = {}\n cnt = 1\n for i in range(nSamples):\n imagePath = imagePathList[i]\n box_x = box_x_list[i]\n box_y = box_y_list[i]\n if len(box_x) == 0:\n continue\n if not os.path.exists(imagePath):\n print('%s does not exist' % imagePath)\n continue\n with open(imagePath, 'rb') as f:\n imageBin = f.read()\n\n imageKey = 'image-%09d' % cnt\n cache[imageKey] = imageBin\n box_x_Key = 'boxes_x-%09d' % cnt\n box_y_Key = 'boxes_y-%09d' % cnt\n cache[box_x_Key] = box_x.encode()\n cache[box_y_Key] = box_y.encode()\n\n if labelList:\n labelKey = 'label-%09d' % cnt\n cache[labelKey] = labelList[i].encode()\n if region_mask_list:\n region_mask_Key = 'region_mask-%09d' % cnt\n cache[region_mask_Key] = open(region_mask_list[i], 'rb').read()\n if pixel_mask_list:\n pixel_mask_Key = 'pixel_mask-%09d' % cnt\n cache[pixel_mask_Key] = open(pixel_mask_list[i], 'rb').read()\n # embed()\n if cnt % 1000 == 0:\n writeCache(env, cache)\n cache = {}\n print('Written %d / %d' % (cnt, nSamples))\n cnt += 1\n nSamples = cnt - 1\n cache['num-samples'] = str(nSamples).encode()\n writeCache(env, cache)\n print('Created dataset with %d samples' % nSamples)", "def load_data(data_path=DATA_PATH):\n with open (os.path.join(DATA_PATH, \"imdb_extrait.pkl\"),\"rb\") as file:\n \n [data , id2titles , fields ]= pk.load(file)\n \n \n datax = data [: ,:33]\n datay = np.array([1 if x [33] >6.5 else -1 for x in data ])\n \n return datax, datay, id2titles, fields", "def area(self):", "def prepare_data(camera, image_nums):\n from gen_d_params import gen_d_params\n\n # Fetch the data of interest set by \"camera\" and \"image_nums\"\n imgs = [camera + x for x in image_nums]\n d_params = gen_d_params(imgs)\n \n # Add azimuth feature for pixel location\n d_params[\"err_ang\"] = np.rad2deg(np.arctan2(d_params[\"ydiff\"], d_params[\"xdiff\"]))\n d_params[\"r_img\"] = np.sqrt(np.power(d_params[\"x_img\"], 2) + np.power(d_params[\"y_img\"], 2)) \n d_params[\"err_mag\"] = d_params.pop(\"mag\")\n d_params[\"azm_img\"] = np.rad2deg(np.arctan2(d_params[\"y_img\"], d_params[\"x_img\"]))\n \n x1 = np.reshape(d_params[\"r_img\"], (len(d_params[\"r_img\"]), 1))\n x2 = np.reshape(d_params[\"azm_img\"], (len(d_params[\"azm_img\"]), 1))\n x = np.hstack((x1, x2))\n y1 = np.reshape(d_params[\"err_mag\"], (len(d_params[\"err_mag\"]), 1))\n y2 = np.reshape(d_params[\"err_ang\"], (len(d_params[\"err_ang\"]), 1))\n y = np.hstack((y1, y2))\n\n return x, y", "def gen_dataset_ssdd(xml_path, source_img_path, save_img_path):\r\n if not os.path.exists(xml_path):\r\n raise FileExistsError('path not found! : %s' % xml_path)\r\n if not os.path.exists(source_img_path):\r\n raise FileExistsError('path not found! : %s' % source_img_path)\r\n os.makedirs(save_img_path, exist_ok=True)\r\n pbar = tqdm(os.scandir(xml_path))\r\n for xml_file in pbar:\r\n if xml_file.is_file():\r\n extension = os.path.splitext(xml_file.path)[1][1:]\r\n if 'xml' == extension:\r\n pbar.set_description(\"Processing %s\" % xml_file.path)\r\n dom = xml.dom.minidom.parse(xml_file.path)\r\n root = dom.documentElement\r\n img_name = root.getElementsByTagName('filename')[0].firstChild.data\r\n my_object_list = root.getElementsByTagName('object')\r\n for my_object in my_object_list:\r\n object_type = my_object.getElementsByTagName('name')[0].firstChild.data\r\n if object_type == 'ship':\r\n bndbox = my_object.getElementsByTagName('bndbox')[0]\r\n xmin = int(bndbox.getElementsByTagName('xmin')[0].firstChild.data)\r\n ymin = int(bndbox.getElementsByTagName('ymin')[0].firstChild.data)\r\n xmax = int(bndbox.getElementsByTagName('xmax')[0].firstChild.data)\r\n ymax = int(bndbox.getElementsByTagName('ymax')[0].firstChild.data)\r\n a = os.path.join(source_img_path, img_name+'.jpg')\r\n ori_image = cv2.imread(os.path.join(source_img_path, img_name+'.jpg'), -1)\r\n box = [(xmin, ymin), (xmax, ymin), (xmin, ymax), (xmax, ymax)]\r\n if len(ori_image.shape) == 3:\r\n _, _, image_channels = ori_image.shape\r\n sub_image = np.zeros([ymax - ymin + 1, xmax - xmin + 1, image_channels], dtype=np.int)\r\n else:\r\n sub_image = np.zeros([ymax - ymin + 1, xmax - xmin + 1], dtype=np.int)\r\n for y in range(sub_image.shape[0]): #row\r\n for x in range(sub_image.shape[1]): #col\r\n sub_image[y,x] = ori_image[ymin+y-1, xmin+x-1]\r\n sub_imagename = img_name+'_'+str(xmin)+'_'+str(ymin)+'_'+str(xmax)+'_'+str(ymax)+'.png'\r\n cv2.imwrite(os.path.join(save_img_path, sub_imagename), sub_image[:, :, 0])", "def define_areas(\n pixel_filtered_map: np.ndarray, district_heating_zone_threshold: float\n):\n structure = np.ones((3, 3)).astype(int)\n expanded_map = binary_dilation(input=pixel_filtered_map, structure=structure)\n eroded_map = binary_erosion(input=expanded_map, structure=structure)\n labels_array, n_label = measurements.label(\n input=eroded_map,\n structure=structure,\n )\n\n # labels start from 1, therefore the array size is 'num_labels_array + 1'\n areas_potential = np.zeros((n_label + 1)).astype(float)\n if n_label > 0:\n end, start, sorted_array = get_browsing_indexes(\n labels_array=labels_array,\n pixel_filtered_map=pixel_filtered_map,\n n_label=n_label,\n )\n\n for i, (start_index, end_index) in enumerate(zip(start, end)):\n area = sorted_array[start_index:end_index, 3]\n area_potential = np.sum(area)\n if area_potential >= district_heating_zone_threshold:\n # i+1 because labeling starts from 1 and not from 0\n # factor 0.001 for conversion from MWh/ha to GWh/ha\n areas_potential[i + 1] = np.around(np.sum(area_potential) / 1000, 2)\n\n areas = areas_potential[labels_array]\n filtered_map = pixel_filtered_map * (areas > 0).astype(int)\n total_potential = np.sum(areas_potential)\n return areas, filtered_map, total_potential, areas_potential[1:]", "def data_info(data):\n filename = data[\"filename\"]\n X_var = data[\"X_var\"]\n Y_var = data[\"Y_var\"]\n X,Y = read_file(filename,X_var,Y_var)\n input_dim = len(X_var)\n output_dim = len(Y_var)\n return X,Y,input_dim,output_dim", "def get_atom_pos(self, data):\n\n\n if 'neighborhood_size' in self.args:\n neighborhood_size = self.args['neighborhood_size']\n else:\n neighborhood_size = 30\n if 'threshold' in self.args:\n threshold = self.args['threshold']\n else:\n threshold = 30\n\n #Use filters to calculate peaks\n data_max = filters.maximum_filter(data, neighborhood_size)\n maxima = (data == data_max)\n data_min = filters.minimum_filter(data, neighborhood_size)\n diff = ((data_max - data_min) > threshold)\n maxima[diff == 0] = 0\n\n labeled, num_objects = ndimage.label(maxima)\n slices = ndimage.find_objects(labeled)\n x, y = [], []\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n x.append(x_center)\n y_center = (dy.start + dy.stop - 1)/2\n y.append(y_center)\n\n\n posiitons=[x,y]\n\n return positions", "def extract_profile(tif, line_file, ds):\r\n\r\n import numpy as np\r\n import gdal\r\n import fiona\r\n from scipy.interpolate import interp1d\r\n# from scipy.interpolate import interp2d\r\n from scipy.ndimage import map_coordinates\r\n \r\n #%% Create evenly spaced points\r\n # Read coordinates of the profile line from shapefile\r\n fiona_obj = fiona.open(line_file)\r\n# line = fiona_obj.next()\r\n line = iter(fiona_obj).next() # this line is proper syntax for fiona v2. Corrected on Mar 12, 2021 by TCB\r\n coords = np.array( line['geometry']['coordinates'] ) # m the easting and northing coordinates of the vertices along the shapefile\r\n \r\n sqrd_deltas = np.diff(coords, axis=0)**2 # squared differences between x and y coordinates\r\n deltas = np.sum(sqrd_deltas, axis=1)**0.5 # m straight-line path length between adjacent points in the shapefile\r\n dist = np.cumsum( np.append(0, deltas) ) # m running distance along the shapefile from one end.\r\n \r\n disti = np.arange(dist[0], dist[-1], ds) # m vector of evenly spaced distances along the shapefile,\r\n # equivalent to an evenly spaced version of dist\r\n xi = interp1d(dist, coords[:,0])(disti) # m the easting coordinates of disti points, at which profile will be extracted\r\n yi = interp1d(dist, coords[:,1])(disti) # m the northing coordinates of disti points, at which profile will be extracted\r\n\r\n #%% Manipulate the raster and extract its data\r\n # ---- dimensions of geotiff\r\n gtif = gdal.Open(tif)\r\n xmin,xres,xskew,ymax,yskew,yres = gtif.GetGeoTransform()\r\n\r\n\r\n # convert the profile coordinates into pixel coordinates\r\n px = (xi - xmin) / xres\r\n py = (yi - ymax) / yres\r\n# px = np.round(col).astype(int)\r\n# py = np.round(row).astype(int)\r\n \r\n \r\n # pull out the array of raster data. Data are assumed to be in band 1.\r\n gtif_data = gtif.GetRasterBand(1).ReadAsArray()\r\n# gtif_data = band.ReadAsArray()px,py, 1, 1)\r\n \r\n # Two early versions of extacting the data:\r\n # profile = map_coordinates(gtif_data,[px,py],order=0,cval=np.nan)\r\n # profile = interp2d(np.arange(gtif_data.shape[1]), np.arange(gtif_data.shape[0]), \r\n # gtif_data)(px, py)\r\n\r\n # Interpolate within gtif_data at given pixel coordinates to identify values from the geotiff \r\n # Uses a 1st order spline interpolant to extract estimated values of\r\n # gtif_data at the (non-integer) pixel values px and py.\r\n # Function returns `cval' at undefined values of gtif_data.\r\n profile = map_coordinates(gtif_data, np.vstack((py, px)),\r\n order=1, cval=np.nan)\r\n \r\n# profile = np.array(profile,dtype=float)\r\n if type(profile[0]) == float:\r\n profile[np.abs(profile) == 9999] = np.nan\r\n \r\n return disti, profile", "def __init__(self, opt, data_dir, data_list):\r\n BaseDataset.__init__(self, opt)\r\n self.max_length = 60\r\n self.opt = opt\r\n self.input_nc = self.opt.output_nc\r\n self.data_dir = data_dir\r\n \r\n self.labels = []\r\n self.paths = []\r\n self.label_lens = []\r\n with open(data_list, 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n line = line.strip()\r\n # label index\r\n if len(line.split(' ')) != 2: \r\n continue\r\n codes = line.split(' ')[1].split(',')\r\n if len(codes) > self.max_length:\r\n continue\r\n if codes[-1] == '':\r\n codes.remove('')\r\n img_code = [int(code)+1 for code in codes if int(code) < 6097]\r\n self.label_lens.append(len(img_code))\r\n # 把标签索引改为等长,后面填充0\r\n length = len(img_code)\r\n if length < self.max_length:\r\n img_code += [0] * (self.max_length - length)\r\n self.labels.append(img_code)\r\n self.paths.append(line.split(' ')[0])\r\n\r\n print ('loading from {:s} : {:d}'.format(data_list, len(self.paths)))\r\n self.size = len(self.paths)", "def load_datasets(filepath, sample_list, label_list, mark, a4c_or_a2c, m):\n # here can adjust n to apply your datasets\n if mark:\n n = 4000\n else:\n n = 4000\n dst_pair1 = np.zeros(shape=(n, m, m, 1), dtype=np.float32)\n dst_pair2 = np.zeros(shape=(n, m, m, 1), dtype=np.float32)\n dst_label = np.zeros(shape=(n,), dtype=np.int32)\n k = 0\n label_list_copy = copy.deepcopy(label_list)\n for number in range(len(sample_list)):\n label = label_list_copy[sample_list[number]-1] # o--->up 1--->down\n start_mark = label.pop()\n for i in (label):\n position = label.index(i)\n if position == len(label)-1:\n break\n j = label[position+1]\n for t in range(i,j):\n # load imgs: from number i to number j-1-->pair1\n # i+1 j-->pair2\n img_p1 = cv2.imread(filepath+\"Patient\"+(\"000\"+str(sample_list[number]))[-4:] +\n \"\\\\a\"+str(a4c_or_a2c)+\"c\\\\\"+str(t)+'.png', 0)\n img_p2 = cv2.imread(filepath+\"Patient\"+(\"000\"+str(sample_list[number]))[-4:] +\n \"\\\\a\"+str(a4c_or_a2c)+\"c\\\\\"+str(t+1)+'.png', 0)\n # cut and unsamping use cv2.resize\n # original 600*800--cut-->512*512--->resize by cv2 ---> m*m\n dst_pair1[k, :, :, 0] = cv2.resize(img_p1[80:592, 176:688].reshape(512, -1, 1), (m, m))/255.0\n dst_pair2[k, :, :, 0] = cv2.resize(img_p2[80:592, 176:688].reshape(512, -1, 1), (m, m))/255.0\n if start_mark == 0: # up\n dst_label[k] = 0 \n else:\n dst_label[k] = 1 \n k += 1\n if start_mark == 0:\n start_mark = 1\n else:\n start_mark = 0\n if mark == 1:\n pathname = 'train'\n elif mark == 0:\n pathname = 'test'\n else:\n pathname = \"val\"\n # save the imgs for augmentation before training.\n os.mkdir('../'+pathname+'p1/') \n os.mkdir('../'+pathname+'p2/')\n K = 0\n for i in (dst_pair1[:k]):\n preprocessing.image.save_img('../'+pathname+'p1/'+str(K)+'.png', i)\n K += 1\n K = 0\n for i in (dst_pair2[:k]):\n preprocessing.image.save_img('../'+pathname+'p2/'+str(K)+'.png', i)\n K += 1\n return dst_pair1[:k], dst_pair2[:k], dst_label[:k]", "def poi(img, cnt):\n\tm = cntInfo(img, cnt)\n\td = {\"max\":m[\"max\"],\"B\":m[\"extrema\"][\"B\"],\"T\":m[\"extrema\"][\"T\"],\"R\":m[\"extrema\"][\"R\"],\"L\":m[\"extrema\"][\"L\"],\"min\":m[\"min\"],\"centroid\":m[\"centroid\"]}\n\treturn d", "def createMaks(self):\n mask = np.zeros((self.height, self.width)) # (H, W)\n center = self.width // 2\n\n for lat in range(self.height):\n count = int(self.counts[lat])\n # print(lat, count)\n # print(center - count, center, center + count)\n mask[lat][center: center + count] = 1\n mask[lat][center - count: center] = 1\n\n return mask # (H, W)", "def _load_annotations(self):\n if self._raw_annotations is not None:\n return self._raw_annotations\n\n dataset_file = os.path.join(self._annotation_path, 'complete_dataset_v{}.pkl'.format(self._version))\n idx_file = os.path.join(self._annotation_path, 'splits_indices_v{}.pkl'.format(self._version))\n\n def get_split_from_ds(ds, idx):\n split = {}\n keys = sorted(ds.keys())\n for j in xrange(len(idx)):\n k = keys[idx[j]]\n split[k] = ds[k]\n return split\n\n with open(idx_file, 'rb') as fid:\n indices = cPickle.load(fid)[self._image_set]\n with open(dataset_file, 'rb') as fid:\n ds = cPickle.load(fid)\n self._raw_annotations = get_split_from_ds(ds, indices)\n\n return self._raw_annotations", "def get_roidb_and_dataset(dataset_name, idxs):\n dataset = JsonDataset(dataset_name)\n \n roidb = dataset.get_roidb()\n\n if idxs is not None:\n total_num_images = len(roidb)\n start = 0\n end = len(idxs)\n roidb = [roidb[i] for i in idxs]\n else:\n start = 0\n end = len(roidb)\n total_num_images = end\n\n return roidb, dataset, start, end, total_num_images", "def _load_pascal_annotations(self, index):\n image_name = self._image_index[index]\n filename = os.path.join(self._data_path, 'Annotations', image_name + '.xml')\n tree = xmlET.parse(filename)\n objs = tree.findall('object')\n if not self.config['use_diff']:\n # Exclude the samples labeled as difficult\n non_diff_objs = [\n obj for obj in objs if int(obj.find('difficult').text) == 0]\n if len(non_diff_objs) != len(objs):\n print 'Removed {} difficult objects'.format(len(objs) - len(non_diff_objs))\n objs = non_diff_objs\n num_objs = len(objs)\n\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros(num_objs, dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n\n # Load object bounding boxes into a data frame.\n # boxes[ind, :] will be boxes\n # gt_classes[ind] will be the associated class name for this box\n # overlaps[ind, class] will assign 1.0 to ground truth\n for ix, obj in enumerate(objs):\n bbox = obj.find('bndbox')\n # Make pixel indexes 0-based\n x1 = float(bbox.find('xmin').text) - 1\n y1 = float(bbox.find('ymin').text) - 1\n x2 = float(bbox.find('xmax').text) - 1\n y2 = float(bbox.find('ymax').text) - 1\n cls = self._class_to_ind[obj.find('name').text.lower().strip()]\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n return {'boxes': boxes,\n 'gt_classes': gt_classes,\n 'gt_overlaps': overlaps,\n 'flipped': False}", "def createDataset_detection(outputPath, imagePathList, box_x_list, box_y_list,\n labelList, region_mask_list, pixel_mask_list):\n assert (len(imagePathList) == len(box_x_list) == len(box_y_list))\n nSamples = len(imagePathList)\n if not os.path.exists(outputPath):\n os.mkdir(outputPath)\n env = lmdb.open(outputPath, map_size=1099511627776)\n cache = {}\n cnt = 1\n for i in range(nSamples):\n imagePath = imagePathList[i]\n box_x = box_x_list[i]\n box_y = box_y_list[i]\n if len(box_x) == 0:\n continue\n if not os.path.exists(imagePath):\n print('%s does not exist' % imagePath)\n continue\n with open(imagePath, 'rb') as f:\n imageBin = f.read()\n\n imageKey = 'image-%09d' % cnt\n cache[imageKey] = imageBin\n box_x_Key = 'boxes_x-%09d' % cnt\n box_y_Key = 'boxes_y-%09d' % cnt\n cache[box_x_Key] = box_x.encode()\n cache[box_y_Key] = box_y.encode()\n\n if labelList:\n labelKey = 'label-%09d' % cnt\n cache[labelKey] = labelList[i].encode()\n if region_mask_list:\n region_mask_Key = 'region_mask-%09d' % cnt\n cache[region_mask_Key] = open(region_mask_list[i], 'rb').read()\n if pixel_mask_list:\n pixel_mask_Key = 'pixel_mask-%09d' % cnt\n cache[pixel_mask_Key] = open(pixel_mask_list[i], 'rb').read()\n # embed()\n if cnt % 1000 == 0:\n writeCache(env, cache)\n cache = {}\n print('Written %d / %d' % (cnt, nSamples))\n cnt += 1\n nSamples = cnt - 1\n cache['num-samples'] = str(nSamples).encode()\n writeCache(env, cache)\n print('Created dataset with %d samples' % nSamples)", "def __call__(self, sample):\n img, landmarks = sample['image'], sample['landmarks']\n p = random.random()\n if p <= 1:\n h, w, c = img.shape\n for i in range(w//2):\n img[:, i, :], img[:, w-1-i, :] = img[:, w-1-i, :], img[:, i, :]\n for i in range(0, len(landmarks[0]), 2):\n x = landmarks[0][i]\n landmarks[0][i] = w-1-x\n return {'image': img,\n 'landmarks': landmarks}", "def test_dataset_info():\n info = utils.get_dataset_info(asset1)\n assert info[\"geometry\"]\n assert info[\"properties\"][\"path\"]\n assert info[\"properties\"][\"bounds\"]\n assert info[\"properties\"][\"datatype\"]\n assert info[\"properties\"][\"minzoom\"] == 7\n assert info[\"properties\"][\"maxzoom\"] == 9", "def get_zone_pixels(feat, input_zone_polygon, input_value_raster, band, coords=[]): #, raster_band\n \n \n \n # Open data\n raster = gdal.Open(input_value_raster)\n shp = ogr.Open(input_zone_polygon)\n lyr = shp.GetLayer()\n \n # Get raster georeference info\n transform = raster.GetGeoTransform()\n xOrigin = transform[0]\n yOrigin = transform[3]\n pixelWidth = transform[1]\n pixelHeight = transform[5]\n \n sizeX = raster.RasterXSize\n sizeY = raster.RasterYSize\n lrx = xOrigin + (sizeX * pixelWidth)\n lry = yOrigin + (sizeY * pixelHeight)\n \n \n \n # Reproject vector geometry to same projection as raster\n #sourceSR = lyr.GetSpatialRef()\n #targetSR = osr.SpatialReference()\n #targetSR.ImportFromWkt(raster.GetProjectionRef())\n #coordTrans = osr.CoordinateTransformation(sourceSR,targetSR)\n #feat = lyr.GetNextFeature()\n #geom = feat.GetGeometryRef()\n #geom.Transform(coordTrans)\n \n # Get extent of feat\n geom = feat.GetGeometryRef()\n if (geom.GetGeometryName() == 'MULTIPOLYGON'):\n count = 0\n pointsX = []; pointsY = []\n for polygon in geom:\n geomInner = geom.GetGeometryRef(count)\n ring = geomInner.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n count += 1\n elif (geom.GetGeometryName() == 'POLYGON'):\n ring = geom.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n pointsX = []; pointsY = []\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n\n else:\n sys.exit(\"ERROR: Geometry needs to be either Polygon or Multipolygon\")\n\n #xmin = min(pointsX) \n #xmax = max(pointsX)\n #ymin = min(pointsY)\n #ymax = max(pointsY)\n \n \n if len(coords) == 0: \n xmin = xOrigin if (min(pointsX) < xOrigin) else min(pointsX)\n xmax = lrx if (max(pointsX) > lrx) else max(pointsX)\n ymin = lry if (min(pointsY) < lry) else min(pointsY)\n ymax = yOrigin if (max(pointsY) > yOrigin) else max(pointsY)\n else:\n xmin = coords[0] if (min(pointsX) < coords[0]) else min(pointsX)\n xmax = coords[1] if (max(pointsX) > coords[1]) else max(pointsX)\n ymin = coords[2] if (min(pointsY) < coords[2]) else min(pointsY)\n ymax = coords[3] if (max(pointsY) > coords[3]) else max(pointsY)\n \n # Specify offset and rows and columns to read\n xoff = int((xmin - xOrigin)/pixelWidth)\n yoff = int((yOrigin - ymax)/pixelWidth)\n xcount = int((xmax - xmin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the right side\n ycount = int((ymax - ymin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the bottom side\n \n #print(xoff, yoff, xcount, ycount)\n \n # Create memory target raster\n target_ds = gdal.GetDriverByName('MEM').Create('', xcount, ycount, 1, gdal.GDT_Byte)\n target_ds.SetGeoTransform((\n xmin, pixelWidth, 0,\n ymax, 0, pixelHeight,\n ))\n\n # Create for target raster the same projection as for the value raster\n raster_srs = osr.SpatialReference()\n raster_srs.ImportFromWkt(raster.GetProjectionRef())\n target_ds.SetProjection(raster_srs.ExportToWkt())\n\n # Rasterize zone polygon to raster\n gdal.RasterizeLayer(target_ds, [1], lyr, burn_values=[1])\n\n # Read raster as arrays\n dataBandRaster = raster.GetRasterBand(band)\n data = dataBandRaster.ReadAsArray(xoff, yoff, xcount, ycount).astype(np.float)\n bandmask = target_ds.GetRasterBand(1)\n datamask = bandmask.ReadAsArray(0, 0, xcount, ycount).astype(np.float)\n\n # data zone of raster\n dataZone = np.ma.masked_array(data, np.logical_not(datamask))\n\n raster_srs = None\n raster = None\n shp = None\n lyr = None\n return [dataZone, [xmin,xmax,ymin,ymax]]", "def generate_dataset(self):\n\t\timg_set = []\n\t\tqa_set = []\n\t\tfor i in range(self.config.dataset_size):\n\t\t\timg, r = self.generate_image()\n\t\t\tq = self.generate_question()\n\t\t\ta = self.generate_answer(r, q)\n\t\t\timg_sample = {\n\t\t\t\t'id': i,\n\t\t\t\t'image': img.tolist()\n\t\t\t}\n\t\t\timg_set.append(img_sample)\n\t\t\tfor j in range(len(q)):\n\t\t\t\tqa_sample = {\n\t\t\t\t\t'id': i,\n\t\t\t\t\t'question': q[j].tolist(),\n\t\t\t\t\t'answer': a[j].tolist()\n\t\t\t\t}\n\t\t\t\tqa_set.append(qa_sample)\n\t\tprint('Finished creating smaples')\n\t\tdataset = {\n\t\t\t'image':\timg_set,\n\t\t\t'qa':\tqa_set\n\t\t}\n\t\twith open(self.path, 'w') as f:\n\t\t\tjson.dump(dataset, f)", "def __init__(self,datamask , h, w, upper, lower):\n self.datamask = datamask\n self.imgidmask = datamask[datamask.type == 'global'].reset_index(drop = True)\n self.h = h\n self.w = w\n self.upper = upper\n self.lower = lower\n self._birads_to_idxs = get_birad()\n self._densities_to_idxs = get_dens()\n self.tfms = get_transform(height = self.h, width =self.w)", "def test():\n import os\n import ClearMap.ImageProcessing.SpotDetection as self\n reload(self)\n import ClearMap.IO as io \n import ClearMap.Settings as settings\n \n basedir = settings.ClearMapPath;\n #fn = '/home/ckirst/Science/Projects/BrainActivityMap/Data/iDISCO_2015_06/Adult cfos C row 20HF 150524.ims';\n fn = os.path.join(basedir, 'Test/Data/Synthetic/label_iDISCO_\\d{3}.tif');\n fn = os.path.join(basedir, 'Test/Data/OME/16-17-27_0_8X-s3-20HF_UltraII_C00_xyz-Table Z\\d{4}.ome.tif');\n #fn = '/run/media/ckirst/ChristophsBackuk4TB/iDISCO_2015_06/Adult cfos C row 20HF 150524.ims';\n #fn = '/home/nicolas/Windows/Nico/cfosRegistrations/Adult cfos C row 20HF 150524 - Copy.ims';\n #fn = '/home/ckirst/Science/Projects/BrainActivityMap/iDISCO_2015_04/test for spots added spot.ims'\n\n img = io.readData(fn);\n #img = dataset[0:500,0:500,1000:1008];\n #img = dataset[600:1000,1600:1800,800:830];\n #img = dataset[500:1500,500:1500,800:809]; \n img = img.astype('int16');\n \n #m = sys.modules['iDISCO.ImageProcessing.SpotDetection']\n #c = self.detectCells(img);\n \n c = self.detectCells(img, dogSize = None, cellShapeThreshold = 1, cellShapeFile = '/home/ckirst/Science/Projects/BrainActivityMap/Analysis/iDISCO/Test/Data/CellShape/cellshape_\\d{3}.tif');\n \n print ('done, found %d cells !' % c[0].shape[0])\n\n\n #test intensities:\n import numpy;\n x = numpy.random.rand(30,30,10);\n centers = numpy.array([[0,0,0], [29,29,9]]);\n i = self.findIntensity(x, centers, boxSize = (1,1,1));\n print (i)", "def coco_format(type_, id_list, annotation_url_list, file_list, result_list, label_list, coco_flag=0):\n annotations = []\n for i, result in enumerate(result_list):\n temp = {}\n annotation_url = annotation_url_list[i]\n file_path = file_list[i]\n temp['id'] = id_list[i]\n temp['annotation'] = []\n im = cv2.imread(file_path)\n height, width, _ = im.shape\n if result.shape[0] == 0:\n temp['annotation'] = json.dumps(temp['annotation'])\n annotations.append(temp)\n with open(annotation_url, 'w') as w:\n w.write(temp['annotation'])\n continue\n else:\n for j in range(result.shape[0]):\n cls_id = int(result[j][0]) + 1 + coco_flag\n x1 = result[j][1]\n x2 = result[j][3]\n y1 = result[j][2]\n y2 = result[j][4]\n score = result[j][5]\n width = max(0, x2 - x1)\n height = max(0, y2 - y1)\n if cls_id in label_list:\n temp['annotation'].append({\n 'area': width * height,\n 'bbox': [x1, y1, width, height],\n 'category_id': cls_id,\n 'iscrowd': 0,\n 'segmentation': [[x1, y1, x2, y1, x2, y2, x1, y2]],\n 'score': score\n })\n if type_ == 2 and len(temp['annotation']) > 0:\n temp['annotation'] = [temp['annotation'][0]]\n temp['annotation'][0].pop('area')\n temp['annotation'][0].pop('bbox')\n temp['annotation'][0].pop('iscrowd')\n temp['annotation'][0].pop('segmentation')\n temp['annotation'] = json.dumps(temp['annotation'])\n annotations.append(temp)\n with open(annotation_url, 'w') as wr:\n wr.write(temp['annotation'])\n return annotations", "def iris():\n return IrisDataset()", "def test_area_defaults():\n cdata = json.loads(CliRunner().invoke(area, [*TONK_ARGS]).output)\n\n assert cdata[\"mark\"][\"type\"] == \"area\"\n\n datavals = list(cdata[\"datasets\"].values())[0]\n assert datavals[0] == {\"date\": \"2007-01-01\", \"price\": 37.67}\n assert datavals[-1] == {\"date\": \"2010-03-01\", \"price\": 128.82}\n\n # even though date is in YYYY-MM-DD format, Altair doesn't automatically know it's temporal\n assert cdata[\"encoding\"][\"x\"] == {\"field\": \"date\", \"type\": \"nominal\"}\n assert cdata[\"encoding\"][\"y\"] == {\"field\": \"price\", \"type\": \"quantitative\"}", "def fit_isophotes(self, debug=False):\n data = self._region.image_intensity\n y0, x0 = np.unravel_index(np.argmax(data), data.shape)\n geometry = EllipseGeometry(\n x0, y0, sma=self.a / 2, eps=self.eccentricity, pa=self.orientation\n )\n ellipse = IsoEllipse(data - np.median(data), geometry)\n isolist = ellipse.fit_image()\n\n if debug:\n plt.imshow(data)\n smas = np.linspace(3, 20, 15)\n for sma in smas:\n iso = isolist.get_closest(sma)\n (\n x,\n y,\n ) = iso.sampled_coordinates()\n plt.plot(x, y, color=\"white\")\n\n return isolist", "def _get_area_incmfd_attr(max_np, max_hd, max_bins):\n\n att = []\n att.append({'name': 'src_id', 'type': 'String', 'len': 10})\n att.append({'name': 'src_name', 'type': 'String', 'len': 30})\n att.append({'name': 'tect_reg', 'type': 'String', 'len': 30})\n att.append({'name': 'upp_seismo', 'type': 'Real'})\n att.append({'name': 'low_seismo', 'type': 'Real'})\n att.append({'name': 'mag_scal_r', 'type': 'String', 'len': 15})\n att.append({'name': 'rup_asp_ra', 'type': 'Real'})\n att.append({'name': 'mfd_type', 'type': 'String', 'len': 20})\n\n att.append({'name': 'min_mag', 'type': 'Real'})\n att.append({'name': 'bin_width', 'type': 'Real'})\n att.append({'name': 'num_bins', 'type': 'Integer'})\n for i in range(1, max_bins+1):\n lab = 'or_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n\n att.append({'name': 'num_npd', 'type': 'Integer'})\n for i in range(1, max_np+1):\n lab = 'weight_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'strike_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'rake_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'dip_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n\n att.append({'name': 'num_hdd', 'type': 'Integer'})\n for i in range(1, max_hd+1):\n lab = 'hdd_d_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'hdd_w_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n\n return att", "def detect_sea_lions_in_image(filename,\n model,\n patch_h,\n patch_w,\n resize_image_patch_to_h, \n resize_image_patch_to_w,\n resize_mask_patch_to_h,\n resize_mask_patch_to_w,\n display_mask=False):\n\n train_image = cv2.imread(filename)\n image_patches_list = dhap.slice_the_image_into_patches(train_image, patch_h, patch_w)\n\n # Recombine the image from the patches (train_image.shape != image.shape)\n # bacause the size of the image is adjusted to be a multiple of patch_h and patch_w. \n image = dhap.combine_pathes_into_image(image_patches_list)\n\n if (display_mask == True):\n fig, ax = plt.subplots()\n cax = ax.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n cbar = fig.colorbar(cax)\n plt.axis(\"off\")\n plt.show() \n\n # Resize the patches to the ones used by the model.\n image_patches_list = dhap.resize_patches_in_patches_list(image_patches_list, \n resize_image_patch_to_h, \n resize_image_patch_to_w)\n\n mask_patches_list = apply_model_to_image_patches_list(image_patches_list, model)\n\n # The model outputs a (1,n) vertor. Reshape it to a matrix.\n mask_patches_list = reshape_patches_list(mask_patches_list,\n resize_mask_patch_to_h,\n resize_mask_patch_to_w)\n\n mask_patches_list = resized_image_patches_list = dhap.resize_patches_in_patches_list(mask_patches_list, \n patch_h, \n patch_w)\n\n mask = dhap.combine_pathes_into_mask(mask_patches_list)\n\n image = dhap.apply_mask(image, mask)\n\n if (display_mask == True):\n fig, ax = plt.subplots()\n cax = ax.imshow(mask)\n cbar = fig.colorbar(cax)\n plt.axis(\"off\")\n plt.show() \n\n\n if (display_mask == True):\n fig, ax = plt.subplots()\n cax = ax.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n cbar = fig.colorbar(cax)\n plt.axis(\"off\")\n plt.show() \n\n\n\n print(mask_patches_list[0][0].shape)\n\n\n #combine_pathes_into_image(patches_list", "def make_ARI_list(dx, dy, m_info, offset):\n \"\"\"\n 1 Get information from m_info.\n \"\"\"\n x_m = m_info[0]\n y_m = m_info[1]\n z_m = m_info[2]\n\n m_points = m_info[3]\n\n m_p0 = m_points[0]\n m_p1 = m_points[1]\n m_p2 = m_points[2]\n m_p3 = m_points[3]\n\n \"\"\"\n 2 Get points of ARI.\n \"\"\"\n x_k = y_m * 2 / 3 # NOTE: fixed number\n\n # KUMIKI_points_left reflect offset\n p5 = (dx, dy)\n p4 = (dx, dy + y_m / 3 - offset)\n p3 = (dx + x_k, dy + y_m / 4 - offset)\n p2 = (dx + x_k, dy + 3 * y_m / 4 + offset)\n p1 = (dx, dy + 2 * y_m / 3 + offset)\n p0 = (dx, dy + y_m)\n\n KUMIKI_points_left = [p0, p1, p2, p3, p4, p5]\n\n # KUMIKI_points_right not reflect offset\n p5 = (dx, dy)\n p4 = (dx, dy + y_m / 3)\n p3 = (dx + x_k, dy + y_m / 4)\n p2 = (dx + x_k, dy + 3 * y_m / 4)\n p1 = (dx, dy + 2 * y_m / 3)\n p0 = (dx, dy + y_m)\n\n KUMIKI_points_right = [p0, p1, p2, p3, p4, p5]\n\n \"\"\"\n 3 Get SEN information.\n \"\"\"\n SEN_info = get_m2_m3_SEN_info(dx, dy, m_info, x_k)\n\n # upper shape\n upper_shape_left, upper_shape_right =\\\n m2_m3_make_upper_shape_points_list(dx, dy, m_info, SEN_info)\n\n upper_shape_left_upper_row = upper_shape_left[0]\n upper_shape_left_lower_row = upper_shape_left[1]\n\n upper_shape_right_upper_row = upper_shape_right[0]\n upper_shape_right_lower_row = upper_shape_right[1]\n\n # lower shape\n lower_shape_left, lower_shape_right =\\\n m2_m3_make_lower_shape_points_list(dx, dy, m_info, SEN_info)\n\n lower_shape_left_upper_row = lower_shape_left[0]\n lower_shape_left_lower_row = lower_shape_left[1]\n\n lower_shape_right_upper_row = lower_shape_right[0]\n lower_shape_right_lower_row = lower_shape_right[1]\n\n # middle shape\n middle_shape_left, middle_shape_right =\\\n m2_m3_make_middle_shape_points_list(dx, dy, m_info, SEN_info)\n\n middle_shape_left_upper_row = middle_shape_left[0]\n middle_shape_left_lower_row = middle_shape_left[1]\n\n middle_shape_right_upper_row = middle_shape_right[0]\n middle_shape_right_lower_row = middle_shape_right[1]\n\n \"\"\"\n 4 Make ARI lists\n \"\"\"\n # Leftside\n # Upper\n left_upper = []\n left_upper.append(m_p1)\n left_upper.extend(upper_shape_left_upper_row)\n\n left_upper.extend(KUMIKI_points_left)\n left_upper.extend(upper_shape_left_lower_row)\n left_upper.append(m_p0)\n\n # left_upper_crv = rs.AddPolyline(left_upper)\n\n # Middle\n left_middle = []\n left_middle.append(m_p1)\n left_middle.extend(middle_shape_left_upper_row)\n\n left_middle.extend(KUMIKI_points_left)\n left_middle.extend(middle_shape_left_lower_row)\n left_middle.append(m_p0)\n\n # left_middle_crv = rs.AddPolyline(left_middle)\n\n # Lower\n left_lower = []\n left_lower.append(m_p1)\n left_lower.extend(lower_shape_left_upper_row)\n\n left_lower.extend(KUMIKI_points_left)\n left_lower.extend(lower_shape_left_lower_row)\n left_lower.append(m_p0)\n\n # left_lower_crv = rs.AddPolyline(left_lower)\n\n # left_crvs = [left_upper_crv, left_middle_crv, left_lower_crv]\n\n left_list = [left_upper, left_middle, left_lower]\n\n # Rightside\n # Upper\n right_upper = []\n right_upper.append(m_p2)\n right_upper.extend(upper_shape_right_upper_row)\n\n right_upper.extend(KUMIKI_points_right)\n right_upper.extend(upper_shape_right_lower_row)\n right_upper.append(m_p3)\n\n # right_upper_crv = rs.AddPolyline(right_upper)\n\n # Middle\n right_middle = []\n right_middle.append(m_p2)\n right_middle.extend(middle_shape_right_upper_row)\n\n right_middle.extend(KUMIKI_points_right)\n right_middle.extend(middle_shape_right_lower_row)\n right_middle.append(m_p3)\n\n # right_middle_crv = rs.AddPolyline(right_middle)\n\n # Lower\n right_lower = []\n right_lower.append(m_p2)\n right_lower.extend(lower_shape_right_upper_row)\n\n right_lower.extend(KUMIKI_points_right)\n right_lower.extend(lower_shape_right_lower_row)\n right_lower.append(m_p3)\n\n # right_lower_crv = rs.AddPolyline(right_lower)\n\n # right_crvs = [right_upper_crv, right_middle_crv, right_lower_crv]\n\n right_list = [right_upper, right_middle, right_lower]\n\n return left_list, right_list, SEN_info", "def create_labelled_dataset(self):\n\n print(\"-------------------------------------------------------------------\")\n print(\" How to Use the Pole Hull Label Tool\")\n print(\"-------------------------------------------------------------------\")\n print(\"- If a hull is NOT associated to a pole: press the 1 button\")\n print(\"- If a hull IS associated to a pole: press the 2 button\")\n print(\"\\n- If any other key is pressed, the program EXITS\")\n print(\"-------------------------------------------------------------------\")\n\n detector = gate_detector.GateDetector(im_resize=3.0/4)\n\n imgs = []\n labels = []\n directory = os.path.dirname(os.getcwd())\n \n # Get absolute path of all images in the images folder\n for dirpath,_,filenames in os.walk(os.path.join(directory, 'images', 'gate')):\n for f in filenames:\n imgs.append(os.path.abspath(os.path.join(dirpath, f)))\n\n # Get the hulls from the segmented image and run the display and label program for each image\n for img in imgs:\n src = cv.imread(img, 1)\n pre = detector.preprocess(src)\n seg = detector.segment(pre)\n mor = detector.morphological(seg)\n hulls = detector.create_convex_hulls(seg)\n labels += self.display_and_label_hulls(hulls, pre)\n return labels", "def oscanSub(img):\n oscanL = img[:,10:50]\n oscanR = img[:,2110:2150]\n mdL=np.median(oscanL,axis=1)\n mdR=np.median(oscanR,axis=1)\n #rowL=np.arange(0,mdL.shape[0])\n #rowR=np.arange(0,mdR.shape[0])\n #(aL,bL,sda,sdb,se)=linefit(rowL,mdL)\n #(aR,bR,sda,sdb,se)=linefit(rowR,mdR)\n #oscanLfit=rowL*bL+aL\n #oscanRfit=rowR*bR+aR\n for i in range(1080):\n img[:,i] = img[:,i] - mdL #oscanLfit\n img[:,1080+i] = img[:,1080+i] - mdR #oscanRfit\n return img", "def create_dataset(image, mask, n, prune=False, save_pxs=False):\n row = 1\n subimages = []\n y = []\n acpx = []\n for i in range(n, 255-n):\n for j in range(n, 255-n):\n if mask[i,j]!=0:\n # numpy submatrices have (first index starting at 0):(last index starting from 1)\n\n store = True\n\n # dont use pixels with associated information outside the prostate\n if prune:\n for v in mask[i-n:i+n+1, j-n:j+n+1]:\n if 0 in v:\n store = False\n break\n\n # store the images as numpy arrays\n if store:\n acpx.append((i, j))\n subimage = image[i-n:i+n+1, j-n:j+n+1]\n subimages.append(subimage)\n # translate (2 -> 1) and 1 -> 0\n yi = 1 if int(round(mask[i,j]))==2 else 0\n y.append(yi)\n row += 1\n\n if save_pxs:\n active_pixels.append(acpx)\n\n # flatten the matrices to feature vectors\n X = [image.flatten() for image in subimages]\n return X, y", "def __test_region(self, bk):\n for arg in self.args['region']:\n ds = ArgoDataFetcher(backend=bk).region(arg).to_xarray()\n assert isinstance(ds, xr.Dataset) == True", "def __init__(self, MRIObj, pRFModelObj = None, FAModelObj = None,\n pRF_data = [], FA_data = [],\n prf_dm = [], max_ecc_ext = 5.5,\n pysub = 'hcp_999999', flatmap_height = 2048, full_figsize = (12, 8)):\n\n # set data object to use later on\n self.MRIObj = MRIObj\n\n # Load pRF and model object\n self.pRFModelObj = pRFModelObj\n self.FAModelObj = FAModelObj\n\n ## data to be plotted \n self.pRF_data = pRF_data\n self.FA_data = FA_data\n\n ## figure settings\n self.flatmap_height = flatmap_height\n self.full_figsize = full_figsize\n self.images = {}\n \n ## create pycortex vars\n self.mask, extents = cortex.quickflat.utils.get_flatmask(pysub, height = self.flatmap_height)\n self.vc = cortex.quickflat.utils._make_vertex_cache(pysub, height = self.flatmap_height)\n\n self.mask_index = np.zeros(self.mask.shape)\n self.mask_index[self.mask] = np.arange(self.mask.sum())\n\n # set prf dm\n self.prf_dm = prf_dm\n\n ## set grid of possible points in downsampled space\n self.point_grid_2D = np.array(np.meshgrid(np.linspace(-1, 1, prf_dm.shape[0]) * max_ecc_ext,\n np.linspace(1, -1, prf_dm.shape[0]) * max_ecc_ext))", "def load_data():\n #read from S3\n df_obs = pd.read_csv('https://nadim-kawwa-dota-bucket.s3.us-west-2.amazonaws.com/df_obs.csv')\n df_sen = pd.read_csv('https://nadim-kawwa-dota-bucket.s3.us-west-2.amazonaws.com/df_sentry.csv')\n \n \n #background image to be used\n img_url = 'https://nadim-kawwa-dota-bucket.s3.us-west-2.amazonaws.com/map_detailed_723.jpeg'\n img_response = requests.get(img_url)\n img = Image.open(BytesIO(img_response.content))\n \n \n #apply translation of coordinates\n df_obs['x'] = df_obs['x'] - 64\n df_obs['y'] = df_obs['y'] - 64\n df_sen['x'] = df_sen['x'] - 64\n df_sen['y'] = df_sen['y'] - 64\n\n #convert time to minutes\n df_obs['time'] = df_obs['time']/60\n df_sen['time'] = df_sen['time']/60\n\n \n return df_obs, df_sen, img", "def process_image(image):\n \n # (step 1) get gray image\n gray = grayscale(image)\n \n # (step 2) do gaussian blur with kernel size is 3\n blur_gray = gaussian_blur(gray, 3)\n \n # (step 3) do canny edge detction with low 50 and hight 150\n canny_edges = canny(blur_gray, 50, 150)\n \n # (step 4) region of interset\n imshape = image.shape\n left_bottom = (50,imshape[0])\n right_bottom = (imshape[1]-50,imshape[0])\n left_top = (420, 330)\n right_top = (imshape[1]-420, 330)\n # used later to discard lines which are out of the ROI\n polygon = Polygon([(50,imshape[0]+1),(imshape[1]-50,imshape[0]+1), (imshape[1]-420, 329), (420, 329)])\n vertices = np.array([[left_bottom,left_top, right_top, right_bottom]], dtype=np.int32)\n masked_edge = region_of_interest(canny_edges, vertices)\n \n # (step 5) get lane lines from hough transform\n rho = 2\n theta = np.pi/18 \n threshold = 15\n min_line_length = 10\n max_line_gap = 20\n lines = hough_lines(masked_edge, rho, theta, threshold, min_line_length, max_line_gap)\n \n # (step 6) seperate left and right lines\n left_lines = []\n right_lines = []\n for line in lines:\n for x1,y1,x2,y2 in line:\n if y1 > y2:\n temp_line = [x1,y1,x2,y2]\n if x2 != x1:\n m = (float(y2) - float(y1)) / (float(x2) - float(x1))\n else:\n m = 1000 # it will be dicarded, any high value will work\n temp_line.append(m)\n if x1 < x2:\n left_lines.append(temp_line)\n else:\n right_lines.append(temp_line)\n else:\n temp_line = [x2,y2,x1,y1]\n if x2 != x1:\n m = (float(y1) - float(y2)) / (float(x1) - float(x2))\n else:\n m = 1000\n temp_line.append(m)\n if x1 > x2:\n left_lines.append(temp_line)\n else:\n right_lines.append(temp_line)\n \n # (step 7) get left and right lines slopes, can be done with step 6 although\n left_slop = []\n for left_line in left_lines:\n x1 = left_line[0]; y1 = left_line[1]; x2 = left_line[2]; y2 = left_line[3]; \n if x1 != x2:\n left_slop.append( (float(y2) - float(y1)) / (float(x2) - float(x1)) )\n average_left_slop = sum(left_slop)/len(left_slop) # not used yet\n \n right_slop = []\n for right_line in right_lines:\n x1 = right_line[0]; y1 = right_line[1]; x2 = right_line[2]; y2 = right_line[3]; \n if x1 != x2:\n right_slop.append( (float(y2) - float(y1)) / (float(x2) - float(x1)) )\n average_right_slope = sum(right_slop)/len(right_slop) # not used yet\n \n \n # (step 8) delete left lines which deviate from thersold_s slope\n thersold_s = 0.4\n delet_left_index = []\n i = 0\n for left_line in left_lines:\n x1 = left_line[0]; y1 = left_line[1]; x2 = left_line[2]; y2 = left_line[3]; m = left_line[4]; \n if abs(m) < thersold_s:\n delet_left_index.append(i)\n i=i+1\n for i in range((len(delet_left_index)-1), -1, -1):\n del left_lines[delet_left_index[i]]\n \n # (step 9) delete right lines which deviate from average slope\n delet_index_right = []\n i = 0\n for right_line in right_lines:\n x1 = right_line[0]; y1 = right_line[1]; x2 = right_line[2]; y2 = right_line[3]; m = right_line[4]; \n if abs(m) < thersold_s:\n delet_index_right.append(i)\n i=i+1\n for i in range((len(delet_index_right)-1), -1, -1):\n del right_lines[delet_index_right[i]]\n \n # (step 10) extrapolate left and right lines\n left_line_draw = True\n x_lefts = []\n y_lefts = []\n for line in left_lines:\n x1, y1, x2, y2, m = line\n x_lefts.append(x1)\n x_lefts.append(x2) \n y_lefts.append(y1)\n y_lefts.append(y2)\n \n if len(x_lefts) > 0:\n slope_left, c_left = np.polyfit(x_lefts, y_lefts, 1)\n else:\n slope_left, c_left = 1, 1\n left_line_draw = False\n \n right_line_draw = True\n x_rights = []\n y_rights = []\n for line in right_lines:\n x1, y1, x2, y2, m = line\n x_rights.append(x1)\n x_rights.append(x2)\n y_rights.append(y1)\n y_rights.append(y2)\n if len(x_rights) > 0:\n slope_right, c_right = np.polyfit(x_rights, y_rights, 1)\n else:\n slope_right, c_right = 1, 1\n right_line_draw = False\n \n y1_left = 530 # again hardcoded values, from ROI\n y2_left = 330 # again hardcoded values, from ROI\n x1_left = int((y1_left - c_left) / slope_left)\n x2_left = int((y2_left - c_left) / slope_left)\n \n y1_right = 530 # again hardcoded values, from ROI\n y2_right = 330 # again hardcoded values, from ROI \n x1_right = int((y1_right - c_right) / slope_right)\n x2_right = int((y2_right - c_right) / slope_right)\n \n # (step 11) check if left/right line is out of ROI\n left_point1 = Point(x1_left, y1_left)\n left_point2 = Point(x2_left, y2_left)\n \n right_point1 = Point(x1_right, y1_right)\n right_point2 = Point(x2_right, y2_right)\n \n if polygon.contains(left_point1) and polygon.contains(left_point2):\n left_line_draw = True\n else:\n #print (\"left line out\", left_point1, left_point2)\n left_line_draw = False\n \n if polygon.contains(right_point1) and polygon.contains(right_point2):\n right_line_draw = True\n else:\n #print (\"right line out\", right_point1, right_point2)\n right_line_draw = False\n \n \n # (step 12) draw lines\n line_image = np.copy(image)\n # Draw the right and left lines on image\n if left_line_draw:\n cv2.line(line_image, (x1_left, y1_left), (x2_left, y2_left), (255,0,0),5)\n if right_line_draw:\n cv2.line(line_image, (x1_right, y1_right), (x2_right, y2_right), (255,0,0),5)\n \n # Create a \"color\" binary image to combine with line image\n color_edges = np.dstack((masked_edge, masked_edge, masked_edge)) \n \n # Draw the lines on the edge image\n lines_edges = cv2.addWeighted(color_edges, 0.4, line_image, 1, 0) \n #plt.imshow(lines_edges)\n #plt.show()\n return lines_edges" ]
[ "0.58949435", "0.5746331", "0.5726559", "0.56060755", "0.5605567", "0.55885756", "0.55885243", "0.54849917", "0.5479196", "0.5452147", "0.5448799", "0.54228556", "0.5405141", "0.54037136", "0.5385172", "0.537736", "0.5366441", "0.5364228", "0.5364176", "0.5347343", "0.5337789", "0.5335361", "0.5334174", "0.53130174", "0.529342", "0.52864605", "0.52807033", "0.5279305", "0.5275045", "0.52745366", "0.5272875", "0.5255874", "0.5253626", "0.52507496", "0.52507293", "0.524715", "0.524349", "0.524278", "0.5230032", "0.52142596", "0.5206868", "0.52051073", "0.5193935", "0.5181923", "0.5181052", "0.51790917", "0.517254", "0.5172185", "0.5169574", "0.516898", "0.5168874", "0.51643014", "0.51615715", "0.5153436", "0.5152915", "0.5149733", "0.5146594", "0.5122267", "0.5115401", "0.5114937", "0.51087624", "0.510742", "0.5102262", "0.5088444", "0.50880235", "0.508744", "0.5077426", "0.50764245", "0.5072008", "0.5057134", "0.5055229", "0.5052616", "0.50508326", "0.50470716", "0.50367546", "0.5034433", "0.5033001", "0.50270253", "0.5025079", "0.50211465", "0.5009822", "0.49999806", "0.4997932", "0.49916407", "0.49896938", "0.49887922", "0.49839804", "0.49783483", "0.4973623", "0.49735308", "0.496843", "0.4965407", "0.4964403", "0.496348", "0.49604672", "0.49591544", "0.4958004", "0.49574614", "0.49564677", "0.49547943" ]
0.5257886
31
read label,image from tfrecord datafile read number is not limited, 0 indefinite use tensorflow.Session
def fun_read_tfrecord_filelist(file_list, read_num=100): # check file list is ok if type(file_list) != list: if type(file_list) == str: file_list = [file_list] else: print('need a file_name or files_name_list!') return for s in file_list: if not os.path.isfile(s): print(f'file error: not found file:\"{s}\"!') return {}, [] # 根据文件名生成一个队列 filename_queue = tf.train.string_input_producer(file_list) reader = tf.TFRecordReader() # 返回文件名和文件 _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example(serialized_example, features={ 'label': tf.FixedLenFeature([], tf.string), 'image': tf.FixedLenFeature([], tf.string), }) label = tf.cast(features['label'], tf.string) image = tf.decode_raw(features['image'], tf.uint8) # img = tf.reshape(img, [10, 15, 1]) # img = tf.cast(img, tf.float32) * (1. / 255) - 0.5 image_dict = {} label_list = [] with tf.Session() as sess: # init_op = tf.initialize_all_variables() init_op = tf.global_variables_initializer() sess.run(init_op) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) for i in range(read_num): ex_image, ex_label = sess.run([image, label]) # 取出image和label # img=Image.fromarray(example.reshape([15, 12]), 'L') # Image from PIL # img.save(cwd+str(i)+'_''Label_'+str(l)+'.jpg') #存下图片 # print(i, ex_image.shape, ex_label) image_dict[i] = ex_image label_list.append(int(chr(ex_label[0]))) coord.request_stop() coord.join(threads) return image_dict, label_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_single():\n\n\n filename = glob.glob(\"rawdata/tfrecords_cleaned/*.tfrecords\")\n\n\n filename_queue = tf.train.string_input_producer(filename, num_epochs=None)\n img, label, rk, links = read_and_decode_file(filename_queue)\n\n init = tf.global_variables_initializer()\n\n sess = tf.Session()\n sess.run(init)\n tf.train.start_queue_runners(sess=sess)\n\n for i in range(len(filename)):\n for k in range(50):\n img_1, label_1, rk_1, links_1 = sess.run([img, label, rk, links])\n\n print(\"ID: {0}, Lbl: {1}\".format(k, label_1))\n\n plt.imsave(\"testimg_{0}.jpg\".format(k), img_1)\n print(\"write\", k)", "def _read_from_file(queue, config, class_label):\n\t\n\tclass SequenceRecord(object):\n\t\tpass\n\tresult = SequenceRecord()\n\t\n\t# Dimensions of the images and the bytes they each take\n\t# up in the binary file\n\tresult.height = config.image_size\n\tresult.width = config.image_size\n\tresult.depth = config.image_depth\n\tresult.sequence_length = config.num_steps\n\tresult.image_bytes = (result.height * result.width * result.depth)\n\n\tresult.patient_ID_bytes = 5 #uint8\n\n\tinitial_image_name_bytes = 92 #uint8\n\tresult.num_features = config.num_features\n\tresult.one_feature_bytes = 8\n\tresult.feature_bytes = config.num_features * result.one_feature_bytes # float64\n\tresult.coord_bytes = config.num_steps*2*6 # x and y coords, uint32\n\n\trecord_bytes = result.image_bytes * result.sequence_length + result.coord_bytes + result.patient_ID_bytes + initial_image_name_bytes + result.feature_bytes\n\t\n\t# The amount of padding on the image_name must be adjusted based on the number of features\n\t# because the overall number of bytes must be a multiple of 8 for float64 processing of raw output.\n\tincrement = 8 - (record_bytes % 8)\n\tresult.image_name_bytes = initial_image_name_bytes + increment\n\trecord_bytes += increment\n\t\n\t# Create reader with the fixed record length and\n\t# read off one record\n\treader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n\tresult.key, value = reader.read(queue)\n\t# Convert from a string to a vector of uint8 that is record_bytes long.\n\trecord_data = tf.decode_raw(value, tf.uint8, name='decode_raw_uint8')\n\tfeature_data = tf.decode_raw(value, tf.float64, name='decode_raw_float64')\n\tindex = 0\n\tnext_index = result.patient_ID_bytes\n\tresult.subject_id, index = process_slice(index, result.patient_ID_bytes, record_data)\n\tresult.image_name, index = process_slice(index, result.image_name_bytes, record_data)\n\tresult.patch_coords, index = process_slice(index, result.coord_bytes, record_data)\n\n\t# features are taken from float64 stream, they are taken out as a single block of data.\n\tfeature_index = index // result.one_feature_bytes\n\tresult.features, feature_index = process_removal_slice(feature_index, result.num_features, feature_data, config.remove_feature)\n\n\t_ , index = process_slice(index, result.feature_bytes, record_data)\n\tsequence_data = tf.strided_slice(record_data, [index], [record_bytes])\n\n\t# Treat sequence as an image of dimensions [(steps * patch height), width, depth] and normalize per image\n\t# Then reshape back to a single sequence\n\n\twith tf.device(\"/cpu:0\"):\n\t\tnormalized_sequence = tf.reshape(sequence_data,\n\t\t\t[result.sequence_length*result.height,result.width, result.depth])\n\t\tnormalized_sequence = tf.image.per_image_standardization(normalized_sequence)\n\n\t\tresult.sequence = tf.reshape(normalized_sequence,\n\t\t\t\t\t\t\t\t[result.sequence_length, result.height * result.width * result.depth]) #result.image_bytes])\n\t\t\t\t\t\t\t\t\n\tresult.sequence = tf.cast(result.sequence, tf.float32)\n\tresult.label = tf.constant(class_label, shape=[1])\n\n\treturn result", "def _read_labels(test_data=False):\n if not test_data:\n filename = os.path.join(FOLDER_PATH, 'train-labels.idx1-ubyte')\n else:\n filename = os.path.join(FOLDER_PATH, 't10k-labels.idx1-ubyte')\n if not os.path.exists(filename):\n raise ValueError('The file dose not exist.')\n \n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer([filename])\n \n # The first 8 bytes contain file information:\n # [offset] [type] [value] [description]\n # 0000 32 bit integer 0x00000801(2049) magic number\n # 0004 32 bit integer 60000/10000 number of items \n # ...(label value)\n header_bytes = 8\n # Every record consists of a label, with a fixed number of bytes for each.\n record_bytes = 1\n \n # Create a FixedLengthRecordReader to read record.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes,\n header_bytes=header_bytes)\n _, value = reader.read(filename_queue)\n\n # Convert from a string to a vector of uint8, then cast to int32.\n record = tf.cast(tf.decode_raw(value, tf.uint8), tf.int32)\n \n # Reshape from [1] to a scalar shape [].\n label = tf.reshape(record, [])\n\n return label", "def parse_record(raw_record):\n keys_to_features = {\n 'image/height':\n tf.FixedLenFeature((), tf.int64),\n 'image/width':\n tf.FixedLenFeature((), tf.int64),\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'label/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'label/format':\n tf.FixedLenFeature((), tf.string, default_value='png'),\n }\n\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n\n # height = tf.cast(parsed['image/height'], tf.int32)\n # width = tf.cast(parsed['image/width'], tf.int32)\n\n image = tf.image.decode_image(\n tf.reshape(parsed['image/encoded'], shape=[]), _DEPTH)\n image = tf.to_float(tf.image.convert_image_dtype(image, dtype=tf.uint8))\n image.set_shape([None, None, 3])\n\n label = tf.image.decode_image(\n tf.reshape(parsed['label/encoded'], shape=[]), 1)\n label = tf.to_int32(tf.image.convert_image_dtype(label, dtype=tf.uint8))\n label.set_shape([None, None, 1])\n\n\n return image, label", "def read_and_decode(filename, is_train=None):\n filename_queue = tf.train.string_input_producer([filename])\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example, features={\n 'label': tf.FixedLenFeature([], tf.int64),\n 'img_raw': tf.FixedLenFeature([], tf.string),\n }\n )\n # You can do more image distortion here for training data\n img = tf.decode_raw(features['img_raw'], tf.float32)\n img = tf.reshape(img, [32, 32, 3])\n # img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5\n if is_train ==True:\n # 1. Randomly crop a [height, width] section of the image.\n img = tf.random_crop(img, [24, 24, 3])\n\n # 2. Randomly flip the image horizontally.\n img = tf.image.random_flip_left_right(img)\n\n # 3. Randomly change brightness.\n img = tf.image.random_brightness(img, max_delta=63)\n\n # 4. Randomly change contrast.\n img = tf.image.random_contrast(img, lower=0.2, upper=1.8)\n\n # 5. Subtract off the mean and divide by the variance of the pixels.\n img = tf.image.per_image_standardization(img)\n\n elif is_train == False:\n # 1. Crop the central [height, width] of the image.\n img = tf.image.resize_image_with_crop_or_pad(img, 24, 24)\n\n # 2. Subtract off the mean and divide by the variance of the pixels.\n img = tf.image.per_image_standardization(img)\n\n elif is_train == None:\n img = img\n\n label = tf.cast(features['label'], tf.int32)\n return img, label", "def save_data_into_tf_records(image_file, image_label, tf_records_writer):\n try:\n img = Image.open(image_file)\n except OSError as e:\n print(e)\n print(\"Error image \" + image_file)\n return False\n # Unify resolution to 300 * 300.\n img = np.array(img.resize((IMAGE_SIZE, IMAGE_SIZE)))\n # img = np.array(img)\n\n # Check if the image is rgb image.\n if len(img.shape) != 3 or img.shape[2] != 3:\n print(\"Not rgb image \" + image_file)\n return False\n # Check if the image is useless.\n same = useless_image_array == img\n if type(same) == np.ndarray:\n if (useless_image_array == img).all():\n print(\"Useless image. \" + image_file)\n return False\n elif type(same) == bool:\n if same:\n print(\"Useless image. \" + image_file)\n return False\n\n img_raw = img.tobytes()\n example = tf.train.Example(features=tf.train.Features(feature={\n \"label\": tf.train.Feature(int64_list=tf.train.Int64List(value=[\n image_label])),\n \"raw\": tf.train.Feature(bytes_list=tf.train.BytesList(value=[\n img_raw])),\n \"height\": tf.train.Feature(int64_list=tf.train.Int64List(value=[\n img.shape[0]])),\n \"width\": tf.train.Feature(int64_list=tf.train.Int64List(\n value=[img.shape[1]])),\n \"channel\": tf.train.Feature(int64_list=tf.train.Int64List(\n value=[img.shape[2]]))\n }))\n tf_records_writer.write(example.SerializeToString())\n return True", "def _read_input(filename_queue):\n label_bytes = 1\n height = 32\n depth = 3\n image_bytes = height * height * depth\n record_bytes = label_bytes + image_bytes\n\n reader = tf.compat.v1.FixedLengthRecordReader(record_bytes=record_bytes)\n _, byte_data = reader.read(filename_queue)\n uint_data = tf.io.decode_raw(byte_data, tf.uint8)\n\n label = tf.cast(tf.strided_slice(uint_data, [0], [label_bytes]), tf.int32)\n label.set_shape([1])\n\n depth_major = tf.reshape(\n tf.strided_slice(uint_data, [label_bytes], [record_bytes]),\n [depth, height, height])\n image = tf.cast(tf.transpose(a=depth_major, perm=[1, 2, 0]), tf.float32)\n\n return image, label", "def input(record_path,dataset_size,batch_size,image_size,num_epochs=1):\n\n if not num_epochs: \n num_epochs = None\n filename = record_path\n\n with tf.name_scope('input'):\n filename_queue = tf.train.string_input_producer([filename])\n image,label,target,xmin,ymin,xmax,ymax,classes = read_and_decode(filename_queue,image_size)\n image.set_shape([image_size,image_size,3])\n images,labels,targets,b_xmin,b_ymin,b_xmax,b_ymax,b_classes = tf.train.shuffle_batch([image,label,target,xmin,ymin,xmax,ymax,classes],batch_size = batch_size,num_threads = 8,capacity = dataset_size + 3*batch_size,min_after_dequeue=100)\n dense_labels = tf.sparse_tensor_to_dense(labels)\n dense_targets = tf.sparse_tensor_to_dense(targets)\n dense_xmin = tf.sparse_tensor_to_dense(b_xmin)\n dense_ymin = tf.sparse_tensor_to_dense(b_ymin)\n dense_xmax = tf.sparse_tensor_to_dense(b_xmax)\n dense_ymax = tf.sparse_tensor_to_dense(b_ymax)\n dense_classes = tf.sparse_tensor_to_dense(b_classes)\n images = tf.cast(images,tf.float32)\n\n dense_box = tf.stack(\n [dense_xmin,dense_ymin,dense_xmax,dense_ymax],axis = 2)\n return images,dense_labels,dense_targets,dense_box,dense_classes", "def parse_record_reid(raw_record):\n keys_to_features = {\n 'image_raw': tf.FixedLenFeature([], tf.string),\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'depth': tf.FixedLenFeature([], tf.int64),\n 'label': tf.FixedLenFeature([], tf.int64)\n }\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n # image = tf.image.decode_image(\n # tf.reshape(parsed['image_raw'], shape=[]), _DEPTH)\n\n image = tf.decode_raw(parsed['image_raw'], tf.uint8)\n # image = tf.to_float(tf.image.convert_image_dtype(image, dtype=tf.uint8))\n image = tf.reshape(image, [_HEIGHT, _WIDTH, 3])\n # image = tf.cast(image, tf.float32) * (1. / 255.0)\n image = tf.cast(image,tf.float32)\n\n label = tf.cast(parsed['label'],tf.int32)\n\n label = tf.one_hot(label, labels_nums, 1, 0)\n # labels={\"seg\":None,\"reid\":label}\n return image, label", "def read_pictures_data(filename, pic_size=(), data_folder=''):\n if type(filename) != str or type(data_folder) != str:\n raise TypeError('The name of the file and the name of the folder must be strings.')\n filename, ext = filename.split('.')\n if ext!='tfrecord':\n raise ValueError('Only TFRecord files can be read.')\n files = []\n i_file = 0\n while True:\n f = filename+str(i_file)+'.'+ext\n if os.path.isfile(os.path.join(data_folder,f)):\n files.append(os.path.join(data_folder,f))\n else:\n break\n i_file += 1\n nshards = i_file+1\n\n def parser_func(tfrecord):\n #feats = {'pic': tf.FixedLenFeature((pic_size[0]*pic_size[1]*pic_size[2]), tf.float32)}\n feats = {'pic': tf.FixedLenFeature((), tf.string)}\n pfeats = tf.parse_single_example(tfrecord, feats)\n pic = tf.decode_raw(pfeats['pic'], tf.float32)\n return tf.reshape(pic, (pic_size[0],pic_size[1],pic_size[2]))\n\n dataset = tf.data.Dataset.list_files(files).shuffle(nshards) #dataset of filenames\n dataset = dataset.interleave(lambda x: tf.data.TFRecordDataset(x), cycle_length=nshards)\n dataset = dataset.map(map_func=parser_func, num_parallel_calls=32) #number of available CPUs per node in OzStar\n \n return dataset.shuffle(buffer_size=7000, reshuffle_each_iteration=True)", "def readTFRecord():\n #filenameQueue = tf.train.string_input_producer([tfName])\n dataset_fn = tf.data.TFRecordDataset\n _parse_fn = lambda x: parse_fn(x, is_train=False)\n \n # create a tf.data.Dataset from list of files\n filenames = tf.data.Dataset.list_files(\"./processed/train*.tfrecords\")\n dataset = filenames.apply(\n tf.data.experimental.parallel_interleave(dataset_fn, cycle_length=4))\n \n dataset = dataset.map(_parse_fn, num_parallel_calls=2)\n print('After dataset map: {}'.format(dataset))\n \n return make_iterator(dataset)", "def read_and_decode(filename_queue, shape=None):\n label_bytes = 1\n width = shape[0]\n height = shape[1]\n depth = shape[2]\n record_byte_length = label_bytes + width * height\n\n with tf.name_scope(\"read_and_decode\"):\n # Length of record bytes in the dataset\n # Defined in utils module\n reader = tf.TFRecordReader()\n key, record_string = reader.read(filename_queue)\n\n feature_map = {\n \"image/encoded\": tf.FixedLenFeature(\n shape=[], dtype=tf.string)\n }\n parsed = tf.parse_single_example(record_string, feature_map)\n record_bytes = tf.decode_raw(parsed[\"image/encoded\"], tf.int8)\n\n # first byte is the label\n label = tf.cast(tf.strided_slice(record_bytes,\n begin=[0],\n end=[label_bytes]), tf.int32)\n # label = tf.reshape(label, [1])\n # print(label)\n\n # remaining bytes is the example\n example = tf.reshape(tf.strided_slice(record_bytes,\n begin=[label_bytes],\n end=[record_byte_length]), [width, height, depth])\n example = tf.cast(example, tf.float32)\n example.set_shape([width, height, depth])\n label.set_shape(1)\n label = tf.squeeze(label)\n # print(label)\n # label = tf.reshape(label, [0])\n\n return example, label", "def load_tfrecord(fname: str, logger_tag: str) -> tf.data.Dataset:\n logger = logging.getLogger(logger_tag)\n logger.info('Start loading dataset for file %s', fname)\n raw_dataset = tf.data.TFRecordDataset([fname])\n\n def _parse(example_proto):\n feature_description = {\n KEY_IMAGE_BYTES: tf.io.FixedLenFeature([], tf.string, default_value=''),\n KEY_CLASS: tf.io.FixedLenFeature([], tf.int64, default_value=-1),\n }\n return collections.OrderedDict(\n tf.io.parse_single_example(example_proto, feature_description)\n )\n\n ds = raw_dataset.map(_parse)\n\n def _transform(item):\n return collections.OrderedDict([\n (KEY_IMAGE_DECODED, tf.io.decode_jpeg(item[KEY_IMAGE_BYTES])),\n (KEY_CLASS, tf.reshape(item[KEY_CLASS], [1])),\n ])\n\n ds = ds.map(_transform)\n logger.info('Finished loading dataset for file %s', fname)\n return ds", "def read_tensor_from_tfrecords(n_exp,tfrecords_filenames,data_shape,dtypes=[tf.float32,tf.float32],batch_size=8,num_epochs=1,phase='training'):\n assert phase==\"training\" or phase==\"validation\", \"phase must be <training> or <validation>\"\n with tf.device('/cpu:0'):\n with tf.name_scope(phase) as scope:\n traindataset = tf.data.TFRecordDataset(tfrecords_filenames)\n traindataset = traindataset.map(map_func=lambda x:MakeTFRecords_tfdata._read_and_decode(x,data_shape,dtypes),num_parallel_calls=4)\n if phase=='training':\n traindataset = traindataset.shuffle(buffer_size=n_exp)\n traindataset = traindataset.prefetch(buffer_size=n_exp)\n traindataset = traindataset.batch(batch_size)\n traindataset = traindataset.repeat(num_epochs)\n trainiterator = traindataset.make_initializable_iterator()\n element = trainiterator.get_next()\n return trainiterator,element", "def read_tfrecord_and_decode_into_image_label_pair_tensors(tfrecord_filenames_queue, size):\n\n reader = tf.TFRecordReader()\n\n _, serialized_example = reader.read(tfrecord_filenames_queue)\n\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n 'image/depth': tf.FixedLenFeature([], tf.int64),\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n # 'image': tf.FixedLenFeature([], tf.string)\n })\n\n image = tf.decode_raw(features['image/encoded'], tf.uint8)\n label = tf.cast(features['image/class/label'], tf.int64)\n height = tf.cast(features['image/height'], tf.int64)\n width = tf.cast(features['image/width'], tf.int64)\n depth = tf.cast(features['image/depth'], tf.int64)\n\n image = tf.reshape(image, [size,size,3]) #height,width,depth\n image = tf.to_float(image)\n image = image/127.5 - 1.0\n\n return image, label", "def _add_to_tfrecord(filename, tfrecord_writer, offset=0):\n with tf.gfile.Open(filename, 'rb') as f:\n if sys.version_info < (3,):\n data = cPickle.load(f)\n else:\n data = cPickle.load(f, encoding='bytes')\n\n images = data[b'data']\n num_images = images.shape[0]\n\n images = images.reshape((num_images, 3, 32, 32))\n labels = data[b'fine_labels']\n\n with tf.Graph().as_default():\n image_placeholder = tf.placeholder(dtype=tf.uint8)\n encoded_image = tf.image.encode_png(image_placeholder)\n\n with tf.Session('') as sess:\n\n for j in range(num_images):\n sys.stdout.write('\\r>> Reading file [%s] image %d/%d' % (\n filename, offset + j + 1, offset + num_images))\n sys.stdout.flush()\n\n image = np.squeeze(images[j]).transpose((1, 2, 0))\n label = labels[j]\n\n png_string = sess.run(encoded_image,\n feed_dict={image_placeholder: image})\n\n example = dataset_utils.image_to_tfexample(\n png_string, b'png', _IMAGE_SIZE, _IMAGE_SIZE, label)\n tfrecord_writer.write(example.SerializeToString())\n\n return offset + num_images", "def data_to_tfrecord(images, labels, filename):\n if os.path.isfile(filename):\n print(\"%s exists\" % filename)\n return\n print(\"Converting data into %s ...\" % filename)\n # cwd = os.getcwd()\n writer = tf.python_io.TFRecordWriter(filename)\n for index, img in enumerate(images):\n img_raw = img.tobytes()\n # Visualize a image\n # tl.visualize.frame(np.asarray(img, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236)\n label = int(labels[index])\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n \"label\": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),\n 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),\n }\n )\n )\n writer.write(example.SerializeToString()) # Serialize To String\n writer.close()", "def load_record(path, batch_size, n_samples=-1):\n dataset = tf.data.TFRecordDataset(path)\n dataset = dataset.map(lambda x: read_tfrecord(x))\n # https://www.tensorflow.org/api_docs/python/tf/data/Dataset#cache\n if n_samples != -1:\n dataset = dataset.take(n_samples)\n dataset = dataset.cache() \n batches = dataset.batch(batch_size)\n # https://www.tensorflow.org/api_docs/python/tf/data/Dataset#prefetch\n batches = batches.prefetch(buffer_size=1)\n return batches", "def read_images(self, img_name, label_name):\n image_string = tf.read_file(img_name)\n image_decoded = tf.image.decode_jpeg(image_string, channels=3)\n label_string = tf.read_file(label_name)\n label_decoded = tf.image.decode_jpeg(label_string, channels=1)\n return image_decoded, label_decoded", "def _convert_dataset(image_list, label_list, tfrecord_dir):\r\n with tf.Graph().as_default():\r\n with tf.Session() as sess:\r\n if not os.path.exists(tfrecord_dir):\r\n os.makedirs(tfrecord_dir)\r\n output_filename = os.path.join(tfrecord_dir, \"train.tfrecord\")\r\n tfrecord_writer = tf.python_io.TFRecordWriter(output_filename)\r\n length = len(image_list)\r\n for i in range(length):\r\n # 图像数据\r\n image_data = Image.open(image_list[i],'r')\r\n\r\n size = image_data.size\r\n image_data = image_data.tobytes()\r\n label = label_list[i]\r\n example = image_to_tfexample(image_data, label,size)\r\n tfrecord_writer.write(example.SerializeToString())\r\n sys.stdout.write('\\r>> Converting image %d/%d' % (i + 1, length))\r\n sys.stdout.flush()\r\n\r\n sys.stdout.write('\\n')\r\n sys.stdout.flush()", "def parser(record):\n # keys_to_features = {\n # \"image_data\": tf.FixedLenFeature((), tf.string, default_value=\"\"),\n # \"date_time\": tf.FixedLenFeature((), tf.int64, default_value=\"\"),\n # \"label\": tf.FixedLenFeature((), tf.int64,\n # default_value=tf.zeros([], dtype=tf.int64)),\n # }\n\n keys_to_features = {\n \"image_data\": tf.FixedLenFeature((), tf.float, default_value=\"\"),\n \"label\": tf.FixedLenFeature((), tf.int32,\n default_value=tf.zeros([], dtype=tf.int64)),\n }\n parsed = tf.parse_single_example(record, keys_to_features)\n\n # Perform additional preprocessing on the parsed data.\n image = tf.image.decode_jpeg(parsed[\"image_data\"])\n image = tf.reshape(image, [299, 299, 1])\n label = tf.cast(parsed[\"label\"], tf.int32)\n\n return {\"image_data\": image, \"date_time\": parsed[\"date_time\"]}, label", "def read_data(feature_file, label_file):", "def to_tfrecord(data_blob):\n\n id = np.array(data_blob['id'], dtype=np.int32).tobytes()\n dim = np.array(data_blob['images'].shape, dtype=np.int32).tobytes()\n\n images = np.array(data_blob['images'], dtype=np.uint8).tobytes()\n poses = np.array(data_blob['poses'], dtype=np.float32).tobytes()\n depth = np.array(data_blob['depth'], dtype=np.float32).tobytes()\n filled = np.array(data_blob['filled'], dtype=np.float32).tobytes()\n intrinsics = np.array(data_blob['intrinsics'], dtype=np.float32).tobytes()\n\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[id])),\n 'dim': tf.train.Feature(bytes_list=tf.train.BytesList(value=[dim])),\n 'images': tf.train.Feature(bytes_list=tf.train.BytesList(value=[images])),\n 'poses': tf.train.Feature(bytes_list=tf.train.BytesList(value=[poses])),\n 'depth': tf.train.Feature(bytes_list=tf.train.BytesList(value=[depth])),\n 'filled': tf.train.Feature(bytes_list=tf.train.BytesList(value=[filled])),\n 'intrinsics': tf.train.Feature(bytes_list=tf.train.BytesList(value=[intrinsics])),\n }))\n\n return example", "def load_tfrecord(fname, n_jobs=1, verbose=0):\n dfx = delayed(bytestring_to_record)\n pool = Parallel(n_jobs=n_jobs, verbose=verbose)\n results = pool(dfx(x) for x in tf.compat.v1.python_io.tf_record_iterator(fname))\n features = np.concatenate([xy[0] for xy in results], axis=0)\n meta = pd.concat([xy[1] for xy in results], axis=0, ignore_index=True)\n return features, meta", "def parse_record(raw_record, is_training):\n keys_to_features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/class/label':\n tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),\n 'image/class/text':\n tf.FixedLenFeature([], dtype=tf.string, default_value=''),\n }\n\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n\n image = tf.image.decode_image(\n tf.reshape(parsed['image/encoded'], shape=[]),\n _NUM_CHANNELS)\n\n # Note that tf.image.convert_image_dtype scales the image data to [0, 1).\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\n label = tf.cast(\n tf.reshape(parsed['image/class/label'], shape=[]),\n dtype=tf.int32)\n\n return {\"image\": image}, label", "def show_record(filenames):\n # Generate dataset from TFRecord file.\n dataset = tf.data.TFRecordDataset(filenames)\n\n # Make dataset iteratable.\n iterator = dataset.make_one_shot_iterator()\n next_example = iterator.get_next()\n\n # Extract features from single example\n features = _extract_feature(next_example)\n image_decoded = tf.image.decode_image(features['image/encoded'])\n label_x = tf.cast(features['label/x'], tf.int32)\n label_y = tf.cast(features['label/y'], tf.int32)\n\n # Use openCV for preview\n cv2.namedWindow(\"image\", cv2.WINDOW_NORMAL)\n\n # Actrual session to run the graph.\n with tf.Session() as sess:\n while True:\n try:\n image_tensor, label_text = sess.run(\n [image_decoded, (label_x, label_y)])\n\n # Use OpenCV to preview the image.\n image = np.array(image_tensor, np.uint8)\n cv2.imshow(\"image\", image)\n cv2.waitKey(100)\n\n # Show the labels\n print(label_text)\n except tf.errors.OutOfRangeError:\n break", "def extract_images_from_record(record_path, sample, views, output_path):\n\n dataset = tf.data.TFRecordDataset(record_path, compression_type='')\n for i, data in enumerate(dataset):\n if i % sample != 0:\n continue\n\n frame = open_dataset.Frame()\n frame.ParseFromString(bytearray(data.numpy()))\n extract_images_from_frame(frame, views, output_path)", "def _parse_tfexample(example):\n\n ## parse\n features = tf.parse_single_example(example, KEYS2FEATURES)\n\n image = tf.image.decode_png(features['image/encoded'])\n label = tf.image.decode_png(features['label/encoded'])\n # label is decoded as a 3-D png image\n label = label[..., 0]\n im_path = features['image/path']\n la_path = features['label/path']\n\n return image, label, im_path, la_path", "def convert_to_tfrecord(data_files, label_files, output_file, num_steps, test_flag):\n print('Generating %s' % output_file)\n\n with tf.python_io.TFRecordWriter(output_file) as record_writer:\n\n for idx in enumerate(data_files):\n\n print('Working on %s' % data_files[idx[0]])\n print('Working on %s' % label_files[idx[0]])\n\n #data = _read_data(data_files[idx[0]])\n #label = _read_data(label_files[idx[0]])\n\n #data = loadtxt(data_files[idx[0]])\n label = loadtxt(label_files[idx[0]])\n feat = [0,1,2,3]\n feat.extend(range(6,25))\n if test_flag:\n with open(data_files[idx[0]]) as infile:\n data = np.zeros([num_steps, 25])\n cnt = 0\n for line in infile:\n line = line.split()\n data[0:num_steps-1, :]=data[1:num_steps, :]\n data[num_steps-1,:]=line\n data1 = data\n data1[:,0] = signal.detrend(data1[:,0], axis=0)\n write_to_tfrecord(data1[:,feat], label[cnt:cnt+num_steps], num_steps, record_writer)\n cnt+=1\n else:\n with open(data_files[idx[0]]) as infile:\n data = []\n cnt = 1\n for line in infile:\n data.append(line.split())\n if cnt%num_steps==0:\n data = np.array(data, dtype=float)\n data.reshape(data.shape[0], -1)\n #data = signal.detrend(data, axis=0)\n write_to_tfrecord(data[:,feat], label[cnt-num_steps:cnt], num_steps, record_writer)\n data = []\n cnt=cnt+1", "def input_fn(filename):\n\n def parse_single_tfrecord(serializer_item):\n features = {\n 'label': tf.FixedLenFeature([], tf.int64),\n 'sentence': tf.FixedLenFeature([], tf.string)\n }\n\n features_var = tf.parse_single_example(serializer_item, features)\n\n labels = tf.cast(features_var['label'], tf.int64)\n sentence = tf.decode_raw(features_var['sentence'], tf.uint8)\n sentence = tf.cast(sentence, tf.int64)\n return sentence, labels\n\n tf_record_filename = filename\n if not os.path.exists(tf_record_filename):\n raise FileNotFoundError(\"tfrecord not found\")\n tf_record_reader = tf.data.TFRecordDataset(tf_record_filename)\n\n dataset = tf_record_reader.map(parse_single_tfrecord).shuffle(50000).batch(\n 10).repeat(1)\n iterator = dataset.make_one_shot_iterator()\n data, labels = iterator.get_next()\n return data, labels", "def _decode_record(record):\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"input_mask\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"stroke_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"lmask\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"label_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n }\n\n\n example = tf.parse_single_example(record, name_to_features)\n\n #int64 to int32\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n input_ids = example['input_ids']\n input_mask = example['input_mask']\n segment_ids = example['segment_ids']\n stroke_ids = example['stroke_ids']\n label_ids = example['label_ids']\n lmask = example['lmask']\n py_labels = tf.py_func(_get_py_seq, [label_ids], [tf.int32])\n\n return input_ids, input_mask, segment_ids, stroke_ids, lmask, label_ids, py_labels", "def read_and_decode_file(filename_queue):\n\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example, features={\n 'image_raw': tf.FixedLenFeature([], tf.string),\n 'rating_average': tf.FixedLenFeature([], tf.string),\n 'image_name': tf.FixedLenFeature([], tf.string),\n 'image_links': tf.FixedLenFeature([], tf.string),\n })\n\n image = tf.image.decode_jpeg(features['image_raw'], channels=3)\n\n # image_resize = tf.image.resize_images(image, (299, 299))\n label = tf.string_to_number(features['rating_average'], tf.float32)\n # label = features['rating_average']\n\n image_name = features['image_name']\n links = features['image_links']\n\n return image, label, image_name, links", "def read_images_from_disk(input_queue):\n\tlabel = input_queue[1]\n\tfile_contents = tf.read_file(input_queue[0])\n\texample = tf.image.decode_jpeg(file_contents, channels=3)\n\treturn example, label", "def _read_images(test_data=False, as_image=True, for_show=False):\n if not test_data:\n filename = os.path.join(FOLDER_PATH, 'train-images.idx3-ubyte')\n else:\n filename = os.path.join(FOLDER_PATH, 't10k-images.idx3-ubyte')\n if not os.path.exists(filename):\n raise ValueError('The file dose not exist.')\n \n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer([filename])\n \n # The first 16 bytes contain file information:\n # [offset] [type] [value] [description]\n # 0000 32 bit integer 0x00000803(2051) magic number\n # 0004 32 bit integer 60000/10000 number of images \n # 0008 32 bit integer 28 number of rows\n # 0012 32 bit integer 28 number of columns\n # ...(pixel value)\n header_bytes = 16\n # Every record consists of an image, with a fixed number of bytes for each.\n record_bytes = IMAGE_SIZE * IMAGE_SIZE\n \n # Create a FixedLengthRecordReader to read record.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes,\n header_bytes=header_bytes)\n _, value = reader.read(filename_queue)\n\n # Convert from a string to a vector of uint8.\n image = tf.decode_raw(value, tf.uint8)\n \n if for_show:\n reshape_image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE])\n return reshape_image\n\n if as_image: # for CNN\n # Reshape from [height * width * channels] to [height, width, channels].\n reshape_image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, IMAGE_CHANNELS])\n\n # Subtract off the mean and divide by the variance of the pixels.\n # Linearly scales image to have zero mean and unit norm.\n preproc_image = tf.image.per_image_whitening(reshape_image)\n else: # for linear classifier / ANN\n # To avoid ValueError: All shapes must be fully defined:...\n image.set_shape([IMAGE_SIZE * IMAGE_SIZE])\n \n # Cast image pixel value from tf.uint8 to tf.float32\n float_image = tf.cast(image, tf.float32)\n \n # normalization\n preproc_image = tf.div(float_image, 255.0)\n\n return preproc_image", "def input_fn(input_file, batch_size,max_sentence_length,shuffle_num, mode=tf.estimator.ModeKeys.TRAIN):\n def parse_single_tfrecord(serializer_item):\n features = {\n 'label': tf.FixedLenFeature([],tf.int64),\n 'sentence' : tf.FixedLenFeature([max_sentence_length],tf.int64)\n }\n\n features_var = tf.parse_single_example(serializer_item,features)\n\n labels = tf.cast(features_var['label'],tf.int64)\n #sentence = tf.decode_raw(features_var['sentence'],tf.uint8)\n sentence = tf.cast(features_var['sentence'],tf.int64)\n return sentence,labels\n\n\n if not os.path.exists(input_file):\n raise FileNotFoundError(\"tfrecord not found\")\n\n\n tf_record_reader = tf.data.TFRecordDataset(input_file)\n if mode == tf.estimator.ModeKeys.TRAIN:\n print(mode)\n tf_record_reader = tf_record_reader.repeat()\n tf_record_reader = tf_record_reader.shuffle(buffer_size=shuffle_num)\n dataset = tf_record_reader.apply(tf.data.experimental.map_and_batch(lambda record:parse_single_tfrecord(record),\n batch_size,num_parallel_calls=8))\n\n iterator = dataset.make_one_shot_iterator()\n data, labels = iterator.get_next()\n return data, labels", "def dict_to_tf_example(label_map_dict):\n filename = label_map_dict[0]\n img_path = os.path.join(FLAGS.image_data_dir, filename)\n\n try:\n with tf.gfile.GFile(img_path, 'rb') as fid:\n encoded_jpg = fid.read()\n except:\n logging.warning('Image Not Found %s', img_path)\n return None\n\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n (witdh, height) = image.size\n\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n sentence_txt = label_map_dict[1]\n\n\n sentences = []\n f = open('dictionary.json', 'r')\n dictionary = f.read()\n dictionary = json.loads(dictionary)\n for index, _ in enumerate(sentence_txt):\n sentence = []\n for sen in sentence_txt[index].split(' '):\n try:\n sentence.append(dictionary[sen])\n except KeyError:\n sentence.append(dictionary['UNK'])\n sentences.append(sentence)\n\n feature_dict = {\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(witdh),\n 'image/filename': dataset_util.bytes_feature(filename.encode('utf8')),\n 'image/score_0': dataset_util.int64_list_feature(sentences[0]),\n 'image/score_1': dataset_util.int64_list_feature(sentences[1]),\n 'image/score_2': dataset_util.int64_list_feature(sentences[2]),\n 'image/score_3': dataset_util.int64_list_feature(sentences[3]),\n 'image/score_4': dataset_util.int64_list_feature(sentences[4]),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8'))\n }\n\n example = tf.train.Example(features=tf.train.Features(feature=feature_dict))\n return example", "def _add_to_tfrecord(filename, tfrecord_writer,labels_to_class_names, offset=0):\n image = tf.gfile.FastGFile(filename,'r').read()\n label = labels_to_class_names[filename.split('/')[-2]]\n\n with tf.Graph().as_default():\n with tf.Session('') as sess:\n example = dataset_utils.image_to_tfexample(\n image, b'jpg', _IMAGE_SIZE_HEIGHT, _IMAGE_SIZE_WIDTH, label)\n tfrecord_writer.write(example.SerializeToString())\n\n return offset + 1", "def dataset_parser(self, value):\n keys_to_features = {\n 'image/encoded':\n tf.io.FixedLenFeature((), tf.string, ''),\n 'image/format':\n tf.io.FixedLenFeature((), tf.string, 'jpeg'),\n 'image/class/label':\n tf.io.FixedLenFeature([], tf.int64, -1),\n 'image/class/text':\n tf.io.FixedLenFeature([], tf.string, ''),\n 'image/object/bbox/xmin':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/class/label':\n tf.io.VarLenFeature(dtype=tf.int64),\n }\n\n parsed = tf.io.parse_single_example(value, keys_to_features)\n image_bytes = tf.reshape(parsed['image/encoded'], shape=[])\n\n tensors_dict = preprocess_image(\n image_bytes=image_bytes,\n is_training=self.is_training,\n augmentation=self.augmentation,\n use_bfloat16=self.use_bfloat16,\n saturate_uint8=self.saturate_uint8,\n scale_and_center=self.scale_and_center,\n use_default_augment=self.use_default_augment)\n\n # Subtract one so that labels are in [0, 1000).\n label = tf.cast(tf.reshape(parsed['image/class/label'], shape=()) - 1,\n dtype=tf.int32)\n tensors_dict['label'] = label\n\n return tensors_dict", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example[\"src_ids\"].values, example[\"tgt_ids\"].values, example[\"label\"][0]", "def create_tfrec(self, path, imgfile, txtfile):\n ifname = os.path.join(path, imgfile)\n tfname = os.path.join(path, txtfile)\n\n # Size\n with Image.open(ifname) as img:\n width, height = img.size\n\n with tf.gfile.GFile(ifname, 'rb') as eimg:\n enc_img = eimg.read()\n hashed = hashlib.sha256(enc_img).hexdigest().encode('utf8')\n\n # Class and BBox\n with open(tfname, 'r') as fp:\n aline = fp.read()\n aline_splt = aline.split()\n imclazztxt = self.clazzes[int(aline_splt[0])].encode('utf8')\n imclazzint = int(aline_splt[0])\n cx, cy = float(aline_splt[1]), float(aline_splt[2])\n wx, wy = float(aline_splt[3]), float(aline_splt[4])\n xtl = (cx - wx/2.0)\n ytl = (cy - wy / 2.0)\n xbr = (cx + wx / 2.0)\n ybr = (cy + wy / 2.0)\n\n fname, ext = os.path.splitext(imgfile)\n imtype = 'jpeg' if (ext == 'jpg') else 'png'\n\n # Create TF Record\n trec = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': self.add_int_feature(height),\n 'image/width': self.add_int_feature(width),\n 'image/filename': self.add_bytes_feature(imgfile.encode('utf8')),\n 'image/source_id': self.add_bytes_feature(imgfile.encode('utf8')),\n 'image/key/sha256': self.add_bytes_feature(hashed),\n 'image/encoded': self.add_bytes_feature(enc_img),\n 'image/format': self.add_bytes_feature(imtype.encode('utf8')),\n 'image/object/bbox/xmin': self.add_float_feature(xtl),\n 'image/object/bbox/xmax': self.add_float_feature(xbr),\n 'image/object/bbox/ymin': self.add_float_feature(ytl),\n 'image/object/bbox/ymax': self.add_float_feature(ybr),\n 'image/object/class/text': self.add_bytes_feature(imclazztxt),\n 'image/object/class/label': self.add_int_feature(imclazzint)\n }))\n return trec", "def parse_func(record):\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),\n 'image/path': tf.FixedLenFeature((), tf.string, default_value=''),\n 'label/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'label/format': tf.FixedLenFeature((), tf.string, default_value='png'),\n 'label/path': tf.FixedLenFeature((), tf.string, default_value=''),\n 'height': tf.FixedLenFeature((), tf.int64),\n 'width': tf.FixedLenFeature((), tf.int64)\n }\n\n features = tf.parse_single_example(record, keys_to_features)\n\n image = tf.image.decode_png(features['image/encoded'], channels=3)\n label_dtype = tf.uint8\n label = tf.image.decode_png(features['label/encoded'], channels=1, dtype=label_dtype)\n label = tf.reshape(label, tf.convert_to_tensor([features['height'], features['width'], 1]))\n label = tf.squeeze(label)\n\n paths = (features['image/path'], features['label/path'])\n return image, label, paths", "def _get_data_protobuf(self, filename):\n filename_queue = tf.train.string_input_producer([str(filename)],\n num_epochs=None)\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = self._get_features(serialized_example)\n\n # image\n with tf.name_scope(\"deserialise_image\"):\n image, image_height, image_width = self._image_from_features(features)\n\n # ground truth landmarks\n with tf.name_scope(\"deserialise_landmarks\"):\n gt_heatmaps, gt_lms, n_landmarks, visible, marked = self._heatmaps_from_features(features)\n\n # information\n with tf.name_scope(\"deserialise_info\"):\n scale = self._info_from_features(features)\n\n # augmentation\n with tf.name_scope(\"image_augmentation\"):\n if self.augmentation:\n gt_heatmaps, gt_lms, image, image_height, image_width = project.input.augmentation.augmentation(\n gt_heatmaps, gt_lms, image, image_height, image_width,\n max_scale=1.25, min_scale=0.75,\n max_rotate=30., min_rotate=-30.,\n flip_probability=0.5, flip_fn=self.flip_fn)\n\n with tf.name_scope(\"crop\"):\n # crop to 256 * 256\n gt_heatmaps, gt_lms, image = self._crop(gt_heatmaps, gt_lms, image, image_height, image_width)\n\n self._set_shape(image, gt_heatmaps, gt_lms)\n\n return image, gt_heatmaps, gt_lms, scale, marked", "def read_images_from_disk(input_queue):\n label = input_queue[1]\n file_contents = tf.read_file(input_queue[0])\n example = tf.image.decode_png(file_contents, channels=3)\n return example, label", "def _read_tf_example(self,\n record: tf.Tensor,\n feature_preprocessor: Callable[[str], List[str]]\n ) -> types.FeatureAndLabelTensors:\n\n keys_to_features = {}\n keys_to_features[self._text_feature] = tf.FixedLenFeature([], tf.string)\n for label, dtype in self._labels.items():\n keys_to_features[label] = tf.FixedLenFeature([], dtype)\n parsed = tf.parse_single_example(\n record, keys_to_features) # type: Dict[str, types.Tensor]\n\n text = parsed[self._text_feature]\n # I think this could be a feature column, but feature columns seem so beta.\n preprocessed_text = feature_preprocessor(text)\n features = {self._text_feature: preprocessed_text}\n if self._round_labels:\n labels = {label: tf.round(parsed[label]) for label in self._labels}\n else:\n labels = {label: parsed[label] for label in self._labels}\n\n return features, labels", "def parse_tfrecord(raw_example, features_name, labels_name):\n feature_spec = {\n name: tf.io.FixedLenSequenceFeature((), tf.int64, True)\n for name in [features_name, labels_name]}\n parsed_example = tf.io.parse_single_example(\n serialized=raw_example,\n features=feature_spec)\n labels = parsed_example.pop(labels_name)\n features = parsed_example.pop(features_name)\n with tf.control_dependencies([\n tf.compat.v1.assert_equal(tf.shape(input=features), tf.shape(input=labels))\n ]):\n return features, labels", "def __init__(\n self, filenames, num_features=None, num_labels=0, x_dtype=tf.string, y_dtype=tf.int64, batch_size=16,\n skip_count=0, file_repeat=1, num_epoch=None, file_folder=None,\n num_threads=8, buffer_size=2000, shuffle_file=False,\n decode_jpeg=False, use_one_hot_label=False, use_smooth_label=True, num_classes=1):\n if file_folder is None:\n file_folder = FLAGS.DEFAULT_IN + 'tfrecords_{}/'.format(FLAGS.TARGET_SIZE)\n # check inputs\n if isinstance(filenames, str): # if string, add file location and .tfrecords\n filenames = [os.path.join(file_folder, filenames + '.tfrecords')]\n else: # if list, add file location and .tfrecords to each element in list\n filenames = [os.path.join(file_folder, file + '.tfrecords') for file in filenames]\n for file in filenames:\n assert os.path.isfile(file), 'File {} does not exist.'.format(file)\n if file_repeat > 1:\n filenames = filenames * int(file_repeat)\n if shuffle_file:\n # shuffle operates on the original list and returns None / does not return anything\n from random import shuffle\n shuffle(filenames)\n\n # training information\n self.num_features = num_features\n self.num_labels = num_labels\n self.x_dtype = x_dtype\n self.y_dtype = y_dtype\n self.batch_size = batch_size\n self.batch_shape = [self.batch_size, self.num_features]\n self.num_epoch = num_epoch\n self.skip_count = skip_count\n\n # read data\n self.decode_jpeg = decode_jpeg\n self.use_one_hot_label = False if num_labels > 1 else use_one_hot_label\n self.use_smooth_label = use_smooth_label\n self.num_classes = num_classes\n dataset = tf.data.TFRecordDataset(filenames) # setting num_parallel_reads=num_threads decreased the performance\n self.dataset = dataset.map(self.__parser__, num_parallel_calls=num_threads)\n self.iterator = None\n self.buffer_size = buffer_size\n self.scheduled = False\n self.num_threads = num_threads", "def build_tfrecord_input(conf, training=True):\n filenames = gfile.Glob(os.path.join(conf['data_dir'], '*'))\n if not filenames:\n raise RuntimeError('No data_files files found.')\n\n index = int(np.floor(conf['train_val_split'] * len(filenames)))\n if training:\n filenames = filenames[:index]\n else:\n filenames = filenames[index:]\n\n if conf['visualize']:\n filenames = gfile.Glob(os.path.join(conf['data_dir'], '*'))\n print 'using input file', filenames\n shuffle = False\n else: shuffle = True\n\n filename_queue = tf.train.string_input_producer(filenames, shuffle=shuffle)\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n\n image_aux1_seq, image_main_seq, endeffector_pos_seq, action_seq, object_pos_seq, init_pix_distrib_seq = [], [], [], [], [], []\n init_pix_pos_seq = []\n\n load_indx = range(0, 30, conf['skip_frame'])\n load_indx = load_indx[:conf['sequence_length']]\n print 'using frame sequence: ', load_indx\n\n for i in load_indx:\n if 'single_view' not in conf:\n image_main_name = str(i) + '/image_main/encoded'\n image_aux1_name = str(i) + '/image_aux1/encoded'\n action_name = str(i) + '/action'\n endeffector_pos_name = str(i) + '/endeffector_pos'\n # state_name = 'move/' +str(i) + '/state'\n\n if 'canon_ex' in conf:\n init_pix_pos_name = '/init_pix_pos'\n init_pix_distrib_name = str(i) +'/init_pix_distrib'\n\n features = {\n\n image_aux1_name: tf.FixedLenFeature([1], tf.string),\n action_name: tf.FixedLenFeature([ACION_DIM], tf.float32),\n endeffector_pos_name: tf.FixedLenFeature([STATE_DIM], tf.float32),\n }\n if 'single_view' not in conf:\n (features[image_main_name]) = tf.FixedLenFeature([1], tf.string)\n\n if 'canon_ex' in conf:\n (features[init_pix_distrib_name]) = tf.FixedLenFeature([1], tf.string)\n (features[init_pix_pos_name]) = tf.FixedLenFeature([2], tf.float32)\n\n features = tf.parse_single_example(serialized_example, features=features)\n\n COLOR_CHAN = 3\n if '128x128' in conf:\n ORIGINAL_WIDTH = 128\n ORIGINAL_HEIGHT = 128\n IMG_WIDTH = 128\n IMG_HEIGHT = 128\n else:\n ORIGINAL_WIDTH = 64\n ORIGINAL_HEIGHT = 64\n IMG_WIDTH = 64\n IMG_HEIGHT = 64\n\n if 'single_view' not in conf:\n image = tf.decode_raw(features[image_main_name], tf.uint8)\n image = tf.reshape(image, shape=[1,ORIGINAL_HEIGHT*ORIGINAL_WIDTH*COLOR_CHAN])\n image = tf.reshape(image, shape=[ORIGINAL_HEIGHT, ORIGINAL_WIDTH, COLOR_CHAN])\n if IMG_HEIGHT != IMG_WIDTH:\n raise ValueError('Unequal height and width unsupported')\n crop_size = min(ORIGINAL_HEIGHT, ORIGINAL_WIDTH)\n image = tf.image.resize_image_with_crop_or_pad(image, crop_size, crop_size)\n image = tf.reshape(image, [1, crop_size, crop_size, COLOR_CHAN])\n image = tf.image.resize_bicubic(image, [IMG_HEIGHT, IMG_WIDTH])\n image = tf.cast(image, tf.float32) / 255.0\n image_main_seq.append(image)\n\n image = tf.decode_raw(features[image_aux1_name], tf.uint8)\n image = tf.reshape(image, shape=[1, ORIGINAL_HEIGHT * ORIGINAL_WIDTH * COLOR_CHAN])\n image = tf.reshape(image, shape=[ORIGINAL_HEIGHT, ORIGINAL_WIDTH, COLOR_CHAN])\n if IMG_HEIGHT != IMG_WIDTH:\n raise ValueError('Unequal height and width unsupported')\n crop_size = min(ORIGINAL_HEIGHT, ORIGINAL_WIDTH)\n image = tf.image.resize_image_with_crop_or_pad(image, crop_size, crop_size)\n image = tf.reshape(image, [1, crop_size, crop_size, COLOR_CHAN])\n image = tf.image.resize_bicubic(image, [IMG_HEIGHT, IMG_WIDTH])\n image = tf.cast(image, tf.float32) / 255.0\n image_aux1_seq.append(image)\n\n if 'canon_ex' in conf:\n init_pix_distrib = tf.decode_raw(features[init_pix_distrib_name], tf.uint8)\n init_pix_distrib = tf.reshape(init_pix_distrib, shape=[1, ORIGINAL_HEIGHT * ORIGINAL_WIDTH])\n init_pix_distrib = tf.reshape(init_pix_distrib, shape=[ORIGINAL_HEIGHT, ORIGINAL_WIDTH, 1])\n crop_size = min(ORIGINAL_HEIGHT, ORIGINAL_WIDTH)\n init_pix_distrib = tf.image.resize_image_with_crop_or_pad(init_pix_distrib, crop_size, crop_size)\n init_pix_distrib = tf.reshape(init_pix_distrib, [1, crop_size, crop_size, 1])\n init_pix_distrib = tf.image.resize_bicubic(init_pix_distrib, [IMG_HEIGHT, IMG_WIDTH])\n init_pix_distrib = tf.cast(init_pix_distrib, tf.float32) / 255.0\n init_pix_distrib_seq.append(init_pix_distrib)\n\n init_pix_pos = tf.reshape(features[init_pix_pos_name], shape=[1, 2])\n init_pix_pos_seq.append(init_pix_pos)\n\n endeffector_pos = tf.reshape(features[endeffector_pos_name], shape=[1, STATE_DIM])\n endeffector_pos_seq.append(endeffector_pos)\n action = tf.reshape(features[action_name], shape=[1, ACION_DIM])\n action_seq.append(action)\n\n if 'single_view' not in conf:\n # image_main_seq = tf.concat(values=image_main_seq, axis=0)\n image_main_seq = tf.concat(concat_dim=0, values=image_main_seq)\n\n # image_aux1_seq = tf.concat(values=image_aux1_seq, axis=0)\n image_aux1_seq = tf.concat(concat_dim=0, values=image_aux1_seq)\n\n if conf['visualize']: num_threads = 1\n else: num_threads = np.min((conf['batch_size'], 32))\n\n if 'ignore_state_action' in conf:\n [image_main_batch, image_aux1_batch] = tf.train.batch(\n [image_main_seq, image_aux1_seq],\n conf['batch_size'],\n num_threads=num_threads,\n capacity=100 * conf['batch_size'])\n return image_main_batch, image_aux1_batch, None, None\n elif 'canon_ex' in conf:\n endeffector_pos_seq = tf.concat(endeffector_pos_seq, 0)\n action_seq = tf.concat(action_seq, 0)\n\n init_pix_pos_seq = tf.concat(init_pix_pos_seq, 0)\n init_pix_distrib_seq = tf.concat(init_pix_distrib_seq, 0)\n\n [image_aux1_batch, action_batch, endeffector_pos_batch, init_pix_distrib_batch, init_pix_pos_batch] = tf.train.batch(\n [image_aux1_seq, action_seq, endeffector_pos_seq, init_pix_distrib_seq, init_pix_pos_seq],\n conf['batch_size'],\n num_threads=num_threads,\n capacity=100 * conf['batch_size'])\n return image_aux1_batch, action_batch, endeffector_pos_batch, init_pix_distrib_batch, init_pix_pos_batch\n\n elif 'single_view' in conf:\n # endeffector_pos_seq = tf.concat(endeffector_pos_seq, 0)\n # action_seq = tf.concat(action_seq, 0)\n endeffector_pos_seq = tf.concat(0, endeffector_pos_seq)\n action_seq = tf.concat(0, action_seq)\n [image_aux1_batch, action_batch, endeffector_pos_batch] = tf.train.batch(\n [image_aux1_seq, action_seq, endeffector_pos_seq],\n conf['batch_size'],\n num_threads=num_threads,\n capacity=100 * conf['batch_size'])\n return image_aux1_batch, action_batch, endeffector_pos_batch\n\n else:\n # endeffector_pos_seq = tf.concat(endeffector_pos_seq, 0)\n endeffector_pos_seq = tf.concat(0, endeffector_pos_seq)\n # action_seq = tf.concat(action_seq, 0)\n action_seq = tf.concat(0, action_seq)\n [image_main_batch, image_aux1_batch, action_batch, endeffector_pos_batch] = tf.train.batch(\n [image_main_seq,image_aux1_seq, action_seq, endeffector_pos_seq],\n conf['batch_size'],\n num_threads=num_threads,\n capacity=100 * conf['batch_size'])\n return image_main_batch, image_aux1_batch, action_batch, endeffector_pos_batch", "def read_label_data(mode, image_type):\n return np.loadtxt(parse_path(mode, image_type, True), dtype=int, delimiter='\\n')", "def _parse_single(filename, label, image_size=IMAGE_SIZE):\n # Decode and convert image to appropriate type\n image = tf.image.decode_png(tf.read_file(filename), channels=image_size[2])\n image = tf.image.convert_image_dtype(image, tf.float32) # Also scales from [0, 255] to [0, 1)\n # Resize according to module requirements\n image = tf.image.resize_images(image, image_size[:2])\n return image, label", "def read_tfexamples(p, uri, label):\n\n dataset = tf.data.TFRecordDataset(tf.data.Dataset.list_files(f'{uri}/*'),\n compression_type=\"GZIP\")\n\n # Take the input TFRecordDataset and extract the class label that we want.\n # Output format is a K-V PCollection: {class_label: TFRecord in string format}\n data = (p\n | \"DatasetToPCollection\" >> beam.Create(dataset)\n | \"MapToLabel\" >> beam.Map(_generate_elements, label))\n return data", "def file_reader(image_file, label_file):\n\n image = im.imread(image_file)\n\n with open(label_file, \"r\") as file:\n label = float(file.read())\n\n return image, label", "def _create_tf_example(data):\n # File path url\n full_path = os.path.join(os.getcwd(), FLAGS.img_folder,\n '{}'.format(data['name']))\n\n # Read encoded image file, and get properties we need.\n with tf.gfile.GFile(full_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n width, height = image.size\n filename = data['name'].encode('utf8')\n image_format = b'jpg'\n label_x = data['x']\n label_y = data['y']\n\n # After geting all the features, time to generate tensorflow record file.\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': _int64_feature(height),\n 'image/width': _int64_feature(width),\n 'image/filename': _bytes_feature(filename),\n 'image/source_id': _bytes_feature(filename),\n 'image/encoded': _bytes_feature(encoded_jpg),\n 'image/format': _bytes_feature(image_format),\n 'label/x': _int64_feature(label_x),\n 'label/y': _int64_feature(label_y),\n }))\n return tf_example", "def read_tfrecord(file_path, cycle_length=5, num_parallel_calls=10):\n\n files = tf.data.Dataset.list_files(file_path)\n dataset = files.apply(\n tf.contrib.data.parallel_interleave(\n tf.data.TFRecordDataset, cycle_length=cycle_length))\n dataset = dataset.map(_parse_bytes_sample, num_parallel_calls)\n return dataset", "def extract_labels(filename, num_images):\n\n# this function definition has been taken from internet \n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) #Interpret a buffer as a 1-dimensional array\n return labels", "def train_input(config, params):\n \"\"\"\n rawimages: Nb x hf x wf x 3, tf.float32, in [0,1]\n rawlabels: Nb x hf x wf, tf.int32, in [0,Nc-1]\n rawmetadata: Python dictionary with matadata (e.g. image shape, dtype)\n proimages: Nb x hf x wf x 3, tf.float32, in [0,1]\n prolabels: Nb x hf x wf, tf.int32, in [0,Nc-1]\n \"\"\"\n # runconfig = config.runconfig\n # # otherconfig includes: train_preprocess, mappings\n # otherconfig = config.otherconfig\n # hparams = params\n\n # reading, mapping labels to problem from otherconfig['lids2cids'],\n # batching, preprocessing with otherconfig['train_preprocess'], output\n\n # no obvious use of prodata metadata for now\n with tf.variable_scope('input_pipeline'):\n values = None\n for num_dataset in range(len(params.tfrecords_list)): # , params.camvid_tfrecords_path]:\n if values is None:\n values = train_input_per_data(config, params, num_dataset)\n values = list(values) + [num_dataset*tf.ones([params.Nb_list[num_dataset], ], dtype=tf.int32)]\n else:\n _values = list(train_input_per_data(config, params, num_dataset)) + \\\n [num_dataset*tf.ones([params.Nb_list[num_dataset], ], dtype=tf.int32)]\n values = [tf.concat((value1, value2), 0) for value1, value2 in zip(values, _values)]\n\n features = {'rawimages': values[1],\n 'proimages': values[3],\n 'rawimagespaths': values[0][0],\n 'rawlabelspaths': values[0][1]}\n labels = {'rawlabels': values[2],\n 'prolabels': values[4],\n 'domainlabels': values[5]}\n return features, labels", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n print(name)\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def convert_pickle_to_tfrecord(input_files, output_file):\n print('Generating %s' % output_file)\n with tf.python_io.TFRecordWriter(output_file) as writer:\n # draw 10 random number for getting 10 random classes from Imagenet (fixed value for reproducibility)\n # class_id = [145, 153, 289, 404, 405, 510, 805, 817, 867, 950] # random.sample(range(0, 999), 10)\n # class_id = [153, 156, 161, 174, 197, 207, 215, 216, 218, 224, 227, 230, 236, 254, 260] # 15 dog classes (also used in DAC)\n\n # count = np.zeros(shape=len(class_id))\n for input_file in input_files:\n data_dict = read_pickle_from_file(input_file)\n data = data_dict['data']\n mean_img = data_dict['mean']\n labels = data_dict['labels']\n # Labels are indexed from 1, shift it so that indexes start at 0 (imagenet)\n labels = [i - 1 for i in labels]\n\n num_entries_in_batch = len(labels)\n print('Converting %s' % input_file)\n for i in range(num_entries_in_batch):\n # if labels[i] in class_id:\n # labels[i] = class_id.index(labels[i]) # put the labels into the range of 0 to no. clusters\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'height': _int64_feature(64),\n 'width': _int64_feature(64),\n 'depth': _int64_feature(3),\n 'image': _bytes_feature(data[i].tobytes()),\n 'mean_img': _bytes_feature(mean_img.tobytes()),\n 'label': _int64_feature(labels[i])\n }))\n writer.write(example.SerializeToString())\n # count[labels[i]] += 1 # count number of samples per class\n # for idx, num in enumerate(count):\n # print('Number of samples of class %d: %d' % (idx, num))\n # print('Total Number of samples %d' % np.sum(count))", "def load_batch(fpath, label_key='labels'):\n f = open(fpath, 'rb')\n if sys.version_info < (3,):\n d = cPickle.load(f)\n else:\n d = cPickle.load(f, encoding='bytes')\n # decode utf8\n d_decoded = {}\n for k, v in d.items():\n d_decoded[k.decode('utf8')] = v\n d = d_decoded\n f.close()\n data = d['data']\n labels = d[label_key]\n\n data = data.reshape(data.shape[0], 3, 32, 32)\n return data, labels", "def _file_read_op(self, file_names, batch_size,\n num_epochs, num_labels=None, comment_length=None,\n record_defaults=None, *args, **kwargs):\n self.num_labels = num_labels\n self.comment_length = comment_length\n self.batch_size = batch_size\n\n reader = tf.TextLineReader(skip_header_lines=1)\n queue = tf.train.string_input_producer(file_names,\n num_epochs=num_epochs,\n shuffle=True)\n\n _, value = reader.read(queue)\n\n record_defaults = record_defaults if record_defaults\\\n else [[''], [''], [0], [0], [0], [0], [0], [0], *([[-3]] * comment_length)]\n cols = tf.decode_csv(value, record_defaults=record_defaults)\n comment_id = cols[0]\n comment_text = tf.stack(cols[-comment_length:])\n toxicity = tf.stack(cols[2:8])\n\n min_after_dequeue = 10000\n capacity = min_after_dequeue + 4 * batch_size\n self.comment_batch, self.toxicity_batch, self.id_batch = tf.train.shuffle_batch(\n [comment_text, toxicity, comment_id], batch_size=batch_size,\n capacity=capacity, min_after_dequeue=min_after_dequeue\n )\n self.toxicity_batch = tf.cast(self.toxicity_batch, dtype=tf.float32)", "def read_label_file(self, label_file_name = None): #completed\n if label_file_name is None:\n label_file_name = self.label_file_name\n try:\n label_data = sp.loadmat(label_file_name)['labels'].astype(np.int32)\n return label_data#[:,1], label_data[:,0]#in MATLAB format\n except IOError:\n print \"Unable to open \", label_file_name, \"... Exiting now\"\n sys.exit()", "def _parse_function(filename, label):\n\n raw_input = tf.io.read_file(filename=filename)\n\n image_decoded = tf.image.decode_jpeg(contents=raw_input, channels=3)\n # image_decoded = tf.image.decode_png(contents=raw_input, channels=3)\n # image_decoded = tf.image.decode_image(contents=raw_input)\n\n image_decoded = tf.image.convert_image_dtype(image_decoded, tf.float32)\n # image_decoded = tf.cast(image_decoded, tf.int32)\n\n image_decoded = tf.image.resize_images(images=image_decoded, size=[load_size, load_size],\n method=tf.image.ResizeMethod.AREA,\n align_corners=True)\n\n # image_size = image_decoded.shape.as_list()\n if mode == 'train':\n image_decoded = tf.image.resize_image_with_crop_or_pad(image_decoded, load_size + 4, load_size + 4)\n image_decoded = tf.random_crop(image_decoded, [load_size, load_size, 3])\n image_decoded = tf.image.random_flip_left_right(image_decoded)\n # Brightness/saturation/constrast provides small gains .2%~.5% on cifar.\n image_decoded = tf.image.random_brightness(image_decoded, max_delta=63. / 255.)\n image_decoded = tf.image.random_saturation(image_decoded, lower=0.5, upper=1.5)\n image_decoded = tf.image.random_contrast(image_decoded, lower=0.2, upper=1.8)\n image_decoded = tf.image.per_image_standardization(image_decoded)\n\n return image_decoded, label", "def parse_record(record, training): \n # Reshape from [depth * height * width] to [depth, height, width].\n # depth_major = tf.reshape(record, [3, 32, 32])\n depth_major = record.reshape((3, 32, 32))\n\n # Convert from [depth, height, width] to [height, width, depth]\n # image = tf.transpose(depth_major, [1, 2, 0])\n image = np.transpose(depth_major, [1, 2, 0])\n\n image = preprocess_image(image, training) # If any.\n\n return image", "def load_trec(loc=DATA_DIR):\n train, test = [], []\n with open(os.path.join(loc, 'TREC', 'train_5500.label'), 'rb') as f:\n for line in f:\n train.append(line.strip())\n with open(os.path.join(loc, 'TREC', 'TREC_10.label'), 'rb') as f:\n for line in f:\n test.append(line.strip())\n return train, test", "def input_fn(params):\n batch_size = params['batch_size']\n data_dir = params['data_dir']\n noise_dim = params['noise_dim']\n def parser(serialized_example):\n \"\"\"Parses a single tf.Example into image and label tensors.\"\"\"\n features = tf.parse_single_example(\n serialized_example,\n features={\n \"image\": tf.FixedLenFeature([], tf.string),\n \"label\": tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features[\"image\"], tf.uint8)\n image.set_shape([CHANNELS * HEIGHT * WIDTH])\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [CHANNELS, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32) * (2. / 255) - 1\n\n label = tf.cast(features['label'], tf.int32)\n\n random_noise = tf.random_normal([noise_dim])\n features = {\n 'real_images': image,\n 'random_noise': random_noise}\n\n return features, label\n\n # TODO we should use an eval dataset for EVAL # pylint: disable=fixme\n image_files = [os.path.join(data_dir, 'train.tfrecords')]\n tf.logging.info(image_files)\n dataset = tf.data.TFRecordDataset([image_files])\n dataset = dataset.map(parser, num_parallel_calls=batch_size)\n dataset = dataset.prefetch(4 * batch_size).cache().repeat()\n if USE_ALTERNATIVE:\n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n tf.logging.warning('Old version: Used tf.contrib.data.batch_and_drop_remainder instead of regular batch')\n else:\n dataset = dataset.batch(batch_size, drop_remainder=True)\n # Not sure why we use one_shot and not initializable_iterator\n features, labels = dataset.make_one_shot_iterator().get_next()\n\n return features, labels", "def _decode_record(record):\r\n example = tf.io.parse_single_example(serialized=record, features=feature_description)\r\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\r\n # So cast all int64 to int32.\r\n for key in [k for k in example.keys() if k not in ['example_id', 'unique_ids']]:\r\n example[key] = tf.cast(example[key], dtype=tf.int32)\r\n if is_training:\r\n features = {\r\n 'input_ids': example['input_ids'],\r\n 'input_mask': example['input_mask'],\r\n 'segment_ids': example['segment_ids']\r\n }\r\n labels = {\r\n 'start_logits_or_probs': tf.one_hot(example['start_positions'],\r\n depth=seq_length, dtype=tf.float32),\r\n 'end_logits_or_probs': tf.one_hot(example['end_positions'],\r\n depth=seq_length, dtype=tf.float32),\r\n 'ans_type': tf.one_hot(example['answer_types'],\r\n depth=len(ANSWER_TYPE_ORDER), dtype=tf.float32)\r\n }\r\n return (features, labels)\r\n else:\r\n return example", "def write_tfrecord(data_list, output_dir, batch_size_per_file=100):\n for data_category in data_list.keys():\n file_basename = os.path.join(output_dir, data_category)\n for i, (image_path, label_path) in tqdm.tqdm(\n enumerate(\n zip(data_list[data_category]['image_list'],\n data_list[data_category]['label_list']))):\n if i % batch_size_per_file == 0:\n if i != 0:\n writer.close()\n filename = file_basename + '_{:04d}.tfrecord'.format(\n int(i / batch_size_per_file))\n writer = tf.python_io.TFRecordWriter(filename)\n logging.info('Start writing {} data to {}'.format(\n data_category, filename))\n\n filename = image_path\n image = np.array(Image.open(image_path))\n label = np.array(Image.open(label_path))\n height = image.shape[0]\n width = image.shape[1]\n image_raw = image.tostring()\n label_raw = label.tostring()\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'height':\n tf.train.Feature(\n int64_list=tf.train.Int64List(value=[height])),\n 'width':\n tf.train.Feature(\n int64_list=tf.train.Int64List(value=[width])),\n 'image_raw':\n tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[image_raw])),\n 'label_raw':\n tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[label_raw])),\n 'filename':\n tf.train.Feature(\n bytes_list=tf.train.BytesList(\n value=[str.encode(filename)]))\n }))\n writer.write(example.SerializeToString())\n writer.close()", "def dict_to_tf_example(data, label_map_dict):\n\n encoded_jpg_io = io.BytesIO()\n image = data['image']\n image.save(encoded_jpg_io, \"JPEG\", quality=80)\n encoded_jpg = encoded_jpg_io.getvalue()\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n width, height = image.size\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n rotation = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n masks = []\n difficult_obj = []\n for obj in data['object']:\n difficult = bool(int(obj['difficult']))\n difficult_obj.append(int(difficult))\n\n xmin.append(float(obj['bndbox']['xmin']) / width)\n ymin.append(float(obj['bndbox']['ymin']) / height)\n xmax.append(float(obj['bndbox']['xmax']) / width)\n ymax.append(float(obj['bndbox']['ymax']) / height)\n rotation.append(float(obj['rotation']))\n masks.append(obj['mask'])\n classes_text.append(obj['name'].encode('utf8'))\n classes.append(label_map_dict[obj['name']])\n truncated.append(int(obj['truncated']))\n poses.append(obj['pose'].encode('utf8'))\n\n mask = np.stack(masks)\n encoded_mask = pn_encode(mask.flatten()).tolist()\n print('mask encode:', mask.shape, '->', len(encoded_mask)) ###\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/rotation': dataset_util.float_list_feature(rotation),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.bytes_list_feature(poses),\n 'image/segmentation/object': dataset_util.int64_list_feature(encoded_mask),\n 'image/segmentation/object/class': dataset_util.int64_list_feature(classes),\n }))\n return example", "def load_labels():\n filename = os.path.join(config['inference']['model_dir'], 'output_labels.txt')\n global labels\n labels = [line.rstrip() for line in tf.gfile.FastGFile(filename)]", "def load_labels(filename):\n return [line.rstrip() for line in tf.gfile.GFile(filename)]", "def load_labels(filename):\n return [line.rstrip() for line in tf.gfile.GFile(filename)]", "def _convert_to_example(filename, image_buffer, label, height, width):\n\n colorspace = 'RGB'\n channels = 3\n image_format = 'JPEG'\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': _int64_feature(height),\n 'image/width': _int64_feature(width),\n 'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),\n 'image/channels': _int64_feature(channels),\n 'image/class/label': _float64_feature(label),\n 'image/class/p1': _float64_feature(label[0:2]),\n 'image/class/p2': _float64_feature(label[2:4]),\n 'image/class/p3': _float64_feature(label[4:6]),\n 'image/class/p4': _float64_feature(label[6:8]),\n 'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),\n 'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),\n 'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def write_record(dataset, filename):\n writer = tf.python_io.TFRecordWriter(filename)\n print('result dim:')\n print(dataset['images'].shape)\n for image, label, meta in zip(dataset['images'], dataset['labels'],\n dataset['meta']):\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'height': _int64_feature(image.shape[1]),\n 'width': _int64_feature(image.shape[2]),\n 'depth': _int64_feature(2),\n 'label': _int64_feature(label),\n 'meta': _int64_list_feature(meta),\n 'image_raw': _bytes_feature(image.tostring()),\n }))\n writer.write(example.SerializeToString())\n writer.close()", "def _decode_record(self, record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n # tf.logging.info(t)\n # t = tf.sparse.to_dense(t)\n # tf.logging.info(t.get_shape().as_list())\n # assert t.get_shape().as_list()[0] is not None\n example[name] = t\n \n del example[\"source_sos_ids\"]\n del example[\"source_sos_mask\"]\n\n return example", "def input_fn(is_training, data_dir, reid_data_dir= None,batch_size=32, num_epochs=1):\n dataset = tf.data.Dataset.from_tensor_slices(get_filenames(is_training, data_dir))\n dataset_seg = dataset.flat_map(tf.data.TFRecordDataset)\n\n # dataset_reid = tf.data.Dataset.from_tensor_slices(get_filenames_reid(is_training, reid_data_dir))\n # dataset_reid = dataset_reid.flat_map(tf.data.TFRecordDataset)\n\n\n if is_training:\n # When choosing shuffle buffer sizes, larger sizes result in better\n # randomness, while smaller sizes have better performance.\n # is a relatively small dataset, we choose to shuffle the full epoch.\n dataset_seg = dataset_seg.shuffle(buffer_size=_NUM_IMAGES['train'])\n # dataset_reid = dataset_reid.shuffle(buffer_size=30248)\n\n\n dataset_seg = dataset_seg.map(parse_record)\n dataset_seg = dataset_seg.map(lambda image, label: preprocess_image(image, label, is_training))\n dataset_seg = dataset_seg.prefetch(batch_size)\n dataset_seg = dataset_seg.repeat(num_epochs)\n dataset_seg = dataset_seg.batch(batch_size)\n\n # dataset_reid = dataset_reid.map(parse_record_reid)\n # dataset_reid = dataset_reid.map(lambda image, label: preprocess_image_reid(image, label, is_training))\n # dataset_reid = dataset_reid.prefetch(batch_size)\n # dataset_reid = dataset_reid.repeat(num_epochs)\n # dataset_reid = dataset_reid.batch(batch_size)\n\n # iterator = dataset_reid.make_one_shot_iterator()\n # images_reid, label_reid = iterator.get_next()\n\n train_record_file = os.path.join(reid_data_dir, 'train-512-170.tfrecords')\n val_record_file = os.path.join(reid_data_dir, 'val-512-170.tfrecords')\n\n train_images, train_labels = read_records(train_record_file, _HEIGHT, _WIDTH, type='normalization')\n train_images_batch, train_labels_batch = get_batch_images(train_images, train_labels,\n batch_size=batch_size, labels_nums=labels_nums,\n one_hot=True, shuffle=True)\n print(\"reid2222222\", train_images_batch.shape, train_labels_batch.shape)\n val_images, val_labels = read_records(val_record_file, _HEIGHT, _WIDTH, type='normalization')\n val_images_batch, val_labels_batch = get_batch_images(val_images, val_labels,\n batch_size=batch_size, labels_nums=labels_nums,\n one_hot=True, shuffle=False)\n images_reid = train_images_batch\n label_reid = train_labels_batch\n # if is_training:\n # images_reid = train_images_batch\n # label_reid = train_labels_batch\n # else:\n # images_reid = val_images_batch\n # label_reid = val_labels_batch\n iterator = dataset_seg.make_one_shot_iterator()\n images_seg, label_seg = iterator.get_next()\n\n images = {\"seg\": images_seg, \"reid\": images_reid}\n labels = {\"seg\": label_seg, \"reid\": label_reid}\n\n # labels_seg_reid = tf.zeros(shape=[batch_size, labels_nums], dtype=tf.int32)\n # labels_reid_seg = tf.zeros(shape=[batch_size, 512, 170, 1], dtype=tf.int32)\n\n # images = tf.concat([images_seg, images_reid], 0)\n # labels_seg_all = tf.concat([label_seg, labels_reid_seg], 0)\n # labels_reid_all = tf.concat([labels_seg_reid, label_reid], 0)\n # labels = {\"seg\": labels_seg_all, \"reid\": labels_reid_all}\n # batch_out= 1\n\n return images, labels", "def read_inaturalist(key='train', batch_size=64, image_size=299, target_size=None, do_augment=False, buffer_size=2000):\n\n data_size = {'train': 265213, 'val': 3030, 'test': 35350}\n data_label = {'train': 1, 'val': 1, 'test': 0}\n num_images = data_size[key]\n steps_per_epoch = num_images // batch_size\n skip_count = num_images % batch_size\n num_labels = data_label[key]\n num_classes = 1010\n if target_size is None:\n target_size = image_size\n if image_size != target_size:\n print('Image size {} does not equal target size {}. Resize to be done.'.format(image_size, target_size))\n\n filenames = os.listdir(FLAGS.DEFAULT_IN + 'tfrecords_{}/'.format(FLAGS.TARGET_SIZE))\n filenames = [filename.replace('.tfrecords', '') for filename in filenames if key in filename]\n if key == 'test':\n filenames = sorted(filenames) # test tfrecords must be read in order\n print('Reading tfrecords from {}'.format(FLAGS.DEFAULT_IN + 'tfrecords_{}/'.format(FLAGS.TARGET_SIZE)))\n print('The following tfrecords are read: {}'.format(filenames))\n\n dataset = ReadTFRecords(\n filenames, num_labels=num_labels, batch_size=batch_size, buffer_size=buffer_size,\n skip_count=skip_count, num_threads=8, decode_jpeg=True,\n use_one_hot_label=True, use_smooth_label=True if key == 'train' else False, num_classes=num_classes)\n if do_augment:\n from GeneralTools.inception_preprocessing import preprocess_image\n # apply basic data augmentation (random crops, random left-right flipping, color distortion)\n dataset.image_preprocessor(\n 3, image_size, image_size,\n image_augment_fun=lambda x: preprocess_image(\n x, height=target_size, width=target_size,\n is_training=True if key == 'train' else False, fast_mode=False))\n if target_size != image_size:\n dataset.batch_shape = [batch_size, target_size, target_size, 3] \\\n if FLAGS.IMAGE_FORMAT == 'channels_last' else [batch_size, 3, target_size, target_size]\n else:\n dataset.image_preprocessor(\n 3, image_size, image_size,\n resize=None if target_size == image_size else [target_size, target_size])\n dataset.scheduler(shuffle_data=False if key == 'test' else True)\n\n return dataset, steps_per_epoch", "def _read_and_decode(example_proto,data_shape,dtypes):\n features = {}\n for name in data_shape:\n features[name] = tf.FixedLenFeature([], tf.string)\n parsed_features = tf.parse_single_example(example_proto, features)\n count = 0\n res = {}\n for name in data_shape:\n res[name] = parsed_features[name]\n if dtypes[count]!=str:\n res[name]=tf.decode_raw(res[name],dtypes[count])\n if dtypes[count]==tf.float32 or dtypes[count]==tf.float64:\n res[name]=tf.convert_to_tensor(res[name],dtype=dtypes[count])\n if data_shape[name]:\n res[name]=tf.reshape(res[name],shape=data_shape[name])\n count += 1\n return res", "def read_batch(self):\n imgs = []\n labels = []\n idx = np.random.choice(self.nImgs,self.batch_size)\n \tfor i in idx:\n imgs.append(cv2.imread(self.data_files[i]))\n \t labels.append(cv2.imread(self.label_files[i]))\n \timgs,labels = np.array(imgs),np.array(labels)\n imgs = (imgs - self.mean)/self.stddev\n \tlabels = (labels - self.mean)/self.stddev\n return imgs,labels", "def load_food_image_batch(filename, num):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f)\n url_parts = datadict['Image URL'].split(\"/\")\n img_fn = url_parts[-1]\n with open(img_fn):\n X = f.read()\n Y = datadict['coarse_labels']\n X = X.reshape(num, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\n Y = np.array(Y)\n return X, Y", "def load_labels(filename):\n return [line.rstrip() for line in tf.gfile.GFile(filename)]", "def read_inputs(filename, height, padding, num_quant_levels, p_norm,\n predict_semantics):\n for record in tf.python_io.tf_record_iterator(filename):\n example = tf.train.Example()\n example.ParseFromString(record)\n feature_map = example.features\n # Input scan as sdf.\n input_scan = read_input_float_feature(feature_map, 'input_sdf', shape=None)\n (scene_dim_z, scene_dim_y, scene_dim_x) = input_scan.shape\n # Target scan as df.\n if 'target_df' in feature_map.feature:\n target_scan = read_input_float_feature(\n feature_map, 'target_df', [scene_dim_z, scene_dim_y, scene_dim_x])\n if 'target_sem' in feature_map.feature:\n target_semantics = read_input_bytes_feature(\n feature_map, 'target_sem', [scene_dim_z, scene_dim_y, scene_dim_x])\n # Adjust dimensions for model (clamp height, make even for voxel groups).\n height_y = min(height, scene_dim_y - padding)\n scene_dim_x = (scene_dim_x // 2) * 2\n scene_dim_y = (height_y // 2) * 2\n scene_dim_z = (scene_dim_z // 2) * 2\n input_scan = input_scan[:scene_dim_z, padding:padding + scene_dim_y, :\n scene_dim_x]\n input_scan = util.preprocess_sdf(input_scan, constants.TRUNCATION)\n if target_scan is not None:\n target_scan = target_scan[:scene_dim_z, padding:padding + scene_dim_y, :\n scene_dim_x]\n target_scan = util.preprocess_df(target_scan, constants.TRUNCATION)\n if target_semantics is not None:\n target_semantics = target_semantics[:scene_dim_z, padding:\n padding + scene_dim_y, :scene_dim_x]\n target_semantics = util.preprocess_target_sem(target_semantics)\n\n # Default values for previous resolution inputs.\n prediction_scan_low_resolution = np.zeros(\n [scene_dim_z // 2, scene_dim_y // 2, scene_dim_x // 2, 2])\n prediction_semantics_low_resolution = np.zeros(\n [scene_dim_z // 2, scene_dim_y // 2, scene_dim_x // 2], dtype=np.uint8)\n if target_semantics is None:\n target_semantics = np.zeros([scene_dim_z, scene_dim_y, scene_dim_x])\n\n # Load previous level prediction.\n if not FLAGS.is_base_level:\n previous_file = os.path.join(\n FLAGS.output_dir_prev, 'level' + str(FLAGS.hierarchy_level - 1) + '_' +\n os.path.splitext(os.path.basename(filename))[0] + 'pred.tfrecord')\n tf.logging.info('Reading previous predictions frome file: %s',\n previous_file)\n assert os.path.isfile(previous_file)\n for record in tf.python_io.tf_record_iterator(previous_file):\n prev_example = tf.train.Example()\n prev_example.ParseFromString(record)\n prev_feature_map = prev_example.features\n prediction_scan_low_resolution = read_input_float_feature(\n prev_feature_map, 'prediction_df', None)\n (prev_scene_dim_z, prev_scene_dim_y,\n prev_scene_dim_x) = prediction_scan_low_resolution.shape\n offset_z = (prev_scene_dim_z - scene_dim_z // 2) // 2\n offset_x = (prev_scene_dim_x - scene_dim_x // 2) // 2\n prediction_scan_low_resolution = prediction_scan_low_resolution[\n offset_z:offset_z + scene_dim_z // 2, :scene_dim_y // 2, offset_x:\n offset_x + scene_dim_x // 2]\n prediction_scan_low_resolution = util.preprocess_target_sdf(\n prediction_scan_low_resolution, num_quant_levels, constants.TRUNCATION,\n p_norm == 0)\n if predict_semantics:\n prediction_semantics_low_resolution = read_input_bytes_feature(\n prev_feature_map, 'prediction_sem',\n [prev_scene_dim_z, prev_scene_dim_y, prev_scene_dim_x])\n prediction_semantics_low_resolution = prediction_semantics_low_resolution[\n offset_z:offset_z + scene_dim_z // 2, :scene_dim_y // 2, offset_x:\n offset_x + scene_dim_x // 2]\n return (input_scan, target_scan, target_semantics,\n prediction_scan_low_resolution, prediction_semantics_low_resolution)", "def convert_dataset_to_tfrecord(data_set, save_dir, name):\n images = data_set.images\n labels = data_set.labels\n num_samples = data_set.num_examples\n\n rows = images.shape[1]\n cols = images.shape[2]\n depth = images.shape[3]\n\n filename = os.path.join(save_dir, name + '.tfrecords')\n with tf.python_io.TFRecordWriter(filename) as writer:\n for index in range(num_samples):\n image_raw = images[index].tostring()\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'height': _int64_feature(rows),\n 'width': _int64_feature(cols),\n 'depth': _int64_feature(depth),\n 'label': _int64_feature(int(labels[index])),\n 'image': _bytes_feature(image_raw)\n }))\n writer.write(example.SerializeToString())", "def _add_to_tfrecord(record_dir,num_images,image_h, image_w, split_name):\n dataset_train = ShapesDataset()\n dataset_train.load_shapes(num_images, image_h, image_w)\n\n num_shards = int(num_images /2500 ) # 2500\n num_per_shard = int(math.ceil(num_images / float(num_shards)))\n height, width=image_h, image_w\n\n for shard_id in range(num_shards):\n record_filename = _get_dataset_filename(record_dir, split_name, shard_id, num_shards)\n options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB)\n\n with tf.python_io.TFRecordWriter(record_filename, options=options) as tfrecord_writer:\n start_ndx = shard_id * num_per_shard\n end_ndx = min((shard_id + 1) * num_per_shard, num_images)\n for i in range(start_ndx, end_ndx):\n if i % 50 == 0:\n sys.stdout.write('\\r>> Converting image %d/%d shard %d\\n' % (\n i + 1, num_images, shard_id))\n sys.stdout.flush()\n\n img= dataset_train.image_info[i]['image']\n gt_boxes= dataset_train.image_info[i]['gt_boxes']\n masks= dataset_train.image_info[i]['mask']\n mask= dataset_train.image_info[i]['mask_']\n\n img_raw = img.tostring()\n mask_raw = mask.tostring()\n img_id=i\n example = _to_tfexample_coco_raw(\n img_id,\n img_raw,\n mask_raw,\n height, width, gt_boxes.shape[0],\n gt_boxes.tostring(), masks.tostring())\n\n tfrecord_writer.write(example.SerializeToString())\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def _decode_record(record, name_to_features):\n\t\t\texample = tf.parse_single_example(record, name_to_features)\n\n\t\t\t# tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n\t\t\t# So cast all int64 to int32.\n\t\t\tfor name in list(example.keys()):\n\t\t\t\tt = example[name]\n\t\t\t\tif t.dtype == tf.int64:\n\t\t\t\t\tt = tf.to_int32(t)\n\t\t\t\texample[name] = t\n\n\t\t\treturn example", "def input_fn():\n files = tf.data.Dataset.list_files(os.path.join(\n tft_working_dir, filebase + '*'))\n dataset = files.interleave(\n tf.data.TFRecordDataset, cycle_length=4, block_length=16)\n dataset = dataset.map(parser)\n\n if shuffle:\n dataset = dataset.shuffle(buffer_size)\n\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.batch(batch_size)\n\n dataset = dataset.prefetch(prefetch_buffer_size)\n iterator = dataset.make_one_shot_iterator()\n transformed_features, transformed_labels = iterator.get_next()\n\n return transformed_features, transformed_labels", "def generateGenericTFRecord(addrs,labels,numOutputs):\n print(\"Generating TFRecord containing training and test files for {} outputs...\".format(numOutputs))\n filename = 'generic'+str(numOutputs)+'.tfrecords'\n writer = tf.python_io.TFRecordWriter(filename)\n labels = [i-171 for i in labels] #to start from Chinese characters, ignore alphanumeric\n for i in range(len(addrs)):\n # Load the image\n img = Image.open(addrs[i])\n img = np.array(img)\n label = labels[i]\n # Create a feature\n feature = {'label': convertToTFRecord._int64_feature(label),\n 'image': convertToTFRecord._bytes_feature(tf.compat.as_bytes(img.tostring()))}\n # Create an example protocol buffer\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n # Serialize to string and write on the file\n writer.write(example.SerializeToString())\n writer.close()", "def _decode_record(record, name_to_features):\n example = tf.io.parse_single_example(serialized=record, features=name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, dtype=tf.int32)\n example[name] = t\n\n return example", "def read(dataset = \"training\", path = \".\"):\n\n if dataset is \"training\":\n fname_img = os.path.join(path, 'train-images.idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join(path, 't10k-images.idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n # Load everything in some numpy arrays\n with open(fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack(\">II\", flbl.read(8))\n lbl = np.fromfile(flbl, dtype=np.int8)\n\n with open(fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)\n\n get_img = lambda idx: (lbl[idx], img[idx])\n\n # Create an iterator which returns each image in turn\n for i in range(len(lbl)):\n yield get_img(i)", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def get_data(self, t_img_path, v_img_path, t_label_path, v_label_path):\n train_label_names = tf.constant(sorted(os.path.join(t_label_path, name) for name in os.listdir(t_label_path)))\n val_label_names = tf.constant(sorted(os.path.join(v_label_path, name) for name in os.listdir(v_label_path)))\n train_image_names = tf.constant(sorted(os.path.join(t_img_path, name) for name in os.listdir(t_img_path)))\n val_image_names = tf.constant(sorted(os.path.join(v_img_path, name) for name in os.listdir(v_img_path)))\n\n training_dataset = tf.data.Dataset.from_tensor_slices((train_image_names, train_label_names))\n training_dataset = training_dataset.shuffle(buffer_size=50000)\n training_dataset = training_dataset.map(self.dataset_resize_images, num_parallel_calls=4)\n training_dataset = training_dataset.map(\n lambda filename, label: tuple(tf.py_func(self.dataset_convert_labels, [filename, label], [tf.float32, tf.float32], stateful=False)),\n num_parallel_calls=4)\n training_dataset = training_dataset.prefetch(self.batch_size)\n training_dataset = training_dataset.batch(self.batch_size)\n training_dataset = training_dataset.repeat()\n\n val_dataset = tf.data.Dataset.from_tensor_slices((val_image_names, val_label_names))\n val_dataset = val_dataset.shuffle(buffer_size=5000)\n val_dataset = val_dataset.map(self.dataset_resize_images, num_parallel_calls=4)\n val_dataset = val_dataset.map(\n lambda filename, label: tuple(tf.py_func(self.dataset_convert_labels, [filename, label], [tf.float32, tf.float32], stateful=False)),\n num_parallel_calls=4)\n val_dataset = val_dataset.prefetch(self.batch_size)\n val_dataset = val_dataset.batch(self.batch_size)\n val_dataset = val_dataset.repeat()\n\n handle = tf.placeholder(tf.string, shape=[])\n iterator = tf.data.Iterator.from_string_handle(handle, training_dataset.output_types, training_dataset.output_shapes)\n images, labels = iterator.get_next()\n\n training_iterator = training_dataset.make_one_shot_iterator()\n validation_iterator = val_dataset.make_one_shot_iterator()\n\n return handle, training_iterator, validation_iterator, images, labels", "def _read_train_datas(self):\r\n with open(self.train_label_path, 'r') as fb:\r\n lines = fb.readlines()\r\n return self._parse_raw_labels(lines)", "def create_cat_tf_example(label, label_text, img_path, img_name):\n\t\n\twith tf.gfile.FastGFile(img_path + img_name, 'rb') as fid:\n\t encoded_image = fid.read() \n\n\tencoded_image_data = sess.run(resize_image, {encoded_jpg_ph: encoded_image}) # I think this may not be the right way of doing this\n\tb_filename = str.encode(img_name)\n\n\timage_format = b'jpg'\n\txmins = [10.0 / width]\n\txmaxs = [(width - 10) / width]\n\tymins = [10.0 / height]\n\tymaxs = [(height - 10.0) / height]\n\t# classes_text = [str.encode(label_text)]\n\tclasses_text = []\n\tif label_text:\n\t\tclasses_text.append(label_text.encode('utf8'))\n\tclasses = []\n\t# if label == 1:\n\tclasses.append(int(label))\n\t# print(classes_text, classes, b_filename)\n\ttf_example = tf.train.Example(features=tf.train.Features(feature={\n\t\t'image/height': dataset_util.int64_feature(height),\n\t\t'image/width': dataset_util.int64_feature(width),\n\t\t'image/filename': dataset_util.bytes_feature(b_filename),\n\t\t'image/source_id': dataset_util.bytes_feature(b_filename),\n\t\t'image/encoded': dataset_util.bytes_feature(encoded_image_data),\n\t\t# 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n\t\t'image/format': dataset_util.bytes_feature(image_format),\n\t\t'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n\t\t'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n\t\t'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n\t\t'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n\t\t'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n\t\t'image/object/class/label': dataset_util.int64_list_feature(classes),\n\t}))\n\treturn tf_example", "def get_dataset_tfrecords(self, device_idx = 0):\n tf_records = os.path.join(self._data_dir, self._data_file)\n f_metadata = open(tf_records + '.metadata', 'r', encoding= 'utf-8')\n metadata = json.load(f_metadata)\n img_shape = metadata['img_shape']\n dtype = 'tf.' + metadata['d_type']\n f_metadata.close()\n\n data_iter = self._service.get_image_from_tfrecords(\n filenames= [tf_records],\n img_shape= img_shape,\n dt= eval(dtype))\n\n LOGGER.debug('----- TFRECORDS DATA ITER {} -----'.format(data_iter))\n\n return data_iter", "def _add_to_tfrecord(images, labels, tfrecord_writer):\n\n shape = (_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)\n with tf.Graph().as_default():\n image = tf.placeholder(dtype=tf.uint8, shape=shape)\n encoded_png = tf.image.encode_png(image)\n\n with tf.Session('') as sess:\n num_images = len(images)\n for i in range(num_images):\n sys.stdout.write('\\r>> Converting image %d/%d' % (i + 1, num_images))\n sys.stdout.flush()\n\n png_string = sess.run(encoded_png, feed_dict={image: images[i]})\n\n example = dataset_utils.image_to_tfexample(\n png_string, 'png'.encode(), _IMAGE_SIZE, _IMAGE_SIZE, labels[i])\n tfrecord_writer.write(example.SerializeToString())", "def _decode_record(record,name_to_features):\n example = tf.parse_single_example(record,name_to_features)\n\n return example", "def extract_input(config, params, num_take=None):\n num_prefetch = params.Nb*10\n if num_take is not None:\n num_take = max((num_take, params.Nb))\n num_prefetch = min((num_prefetch, num_take))\n mapping = params.inference_problem_def['lids2cids']\n with tf.device('/cpu:0'):\n dataset = tf.data.TFRecordDataset(params.predict_dir)\n if num_take is not None:\n dataset = dataset.take(num_take)\n dataset = dataset.repeat()\n dataset = dataset.map(parse_func, num_parallel_calls=8)\n dataset = dataset.map(\n lambda image, label, paths: (paths, *prepare_data(image, label, mapping, params)))\n dataset = dataset.map(lambda paths, image, label:\n (paths, image, label, *preprocess_evaluate(image, label, params)), num_parallel_calls=8)\n dataset = dataset.batch(params.Nb)\n dataset = dataset.prefetch(num_prefetch)\n iterator = dataset.make_one_shot_iterator()\n values = iterator.get_next()\n\n features = {'rawimages': values[1],\n 'proimages': values[3],\n 'rawimagespaths': values[0][0],\n 'rawlabelspaths': values[0][1]}\n labels = {'rawlabels': values[2],\n 'prolabels': values[4]}\n return features, labels" ]
[ "0.7381145", "0.7147946", "0.70476395", "0.6901313", "0.68394387", "0.6806212", "0.6792378", "0.6766865", "0.6712281", "0.66669863", "0.66519564", "0.6645036", "0.6624598", "0.65483457", "0.6527235", "0.65252674", "0.6501299", "0.64654404", "0.6465048", "0.6458233", "0.6456006", "0.6373383", "0.6332641", "0.63209647", "0.63080406", "0.6274842", "0.6269495", "0.62689435", "0.6244793", "0.62352514", "0.62335426", "0.6228464", "0.6209952", "0.6185932", "0.61812747", "0.61789536", "0.6162697", "0.6154867", "0.6146568", "0.61454624", "0.61306244", "0.6118761", "0.6094753", "0.6091108", "0.60789406", "0.605807", "0.60321444", "0.6026775", "0.60215664", "0.6019279", "0.5996071", "0.5988443", "0.5971343", "0.5965532", "0.59586084", "0.5956936", "0.5952256", "0.5952217", "0.5944042", "0.59426564", "0.59404117", "0.5938626", "0.59339285", "0.5926796", "0.59209317", "0.59187496", "0.5913794", "0.5905305", "0.5896838", "0.5896838", "0.5894787", "0.5882572", "0.5878688", "0.5878477", "0.5865079", "0.58631796", "0.5862753", "0.5855559", "0.5843336", "0.5832891", "0.5821787", "0.582158", "0.5819529", "0.581652", "0.58165115", "0.58164686", "0.5815576", "0.58048666", "0.5804858", "0.5802638", "0.5802638", "0.5802638", "0.5801344", "0.5798557", "0.57936543", "0.5776645", "0.5774435", "0.5771548", "0.57658225", "0.5758222" ]
0.71598876
1
Test if the auditor respects "collect_only" config item
def test_collect_only(cinq_test_service): # Prep setup_info = setup_test_aws(cinq_test_service) account = setup_info['account'] prep_s3_testing(cinq_test_service, collect_only=True) # Add resources client = aws_get_client('s3') bucket_name = dbconfig.get('test_bucket_name', NS_CINQ_TEST, default='testbucket') client.create_bucket(Bucket=bucket_name) # Collect resources collect_resources(account=account, resource_types=['s3']) # Initialize auditor auditor = MockRequiredTagsAuditor() # Setup test cinq_test_service.modify_resource( bucket_name, 'creation_date', '2000-01-01T00:00:00' ) auditor.run() assert not auditor._cinq_test_notices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pytest_ignore_collect(path: Any, config: Config) -> bool:\n if config.option.functional:\n return True\n if config.option.markexpr and \"wip\" in config.option.markexpr:\n return False # collect when looking for markers\n return not (config.option.integration or config.option.integration_only)", "def can_be_collected(self):\n return True", "def is_collecting(self) -> bool:\n return self.orders and self.orders[0].ability.id in {AbilityId.HARVEST_GATHER, AbilityId.HARVEST_RETURN}", "def collect_allowed(message):\n return True", "def _should_profile_production_default():\n return False", "def audit_only(self) -> bool:\n result = True\n for effect in self.allowed_effects:\n if effect not in [\"disabled\", \"audit\", \"auditifnotexists\"]:\n result = False\n return result", "def filter(self, record):\n\n if record.exc_info:\n is_included = 0\n else:\n is_included = 1\n return is_included", "def _must_skip(self):\n if not self.magento_record :\n return \"Product attribute can not imported because it is not importable.\"\n apply_to = self.magento_record.get('apply_to')\n if apply_to and len(apply_to) > 0 and 'simple' not in apply_to:\n return \"Product attribute can not imported because it not for simple product.\"\n return", "def is_experiment(cfg):\n if CONDITIONS in list(cfg.keys()):\n return True\n else:\n return False", "def configured(self):\n return super().configured and self.max_harvest is not None", "def is_condition(cfg):\n if SELECTIONS in list(cfg.keys()):\n return True\n else:\n return False", "def pytest_ignore_collect(path):\n if not WITH_ASYNC and basename(str(path)) in async_files:\n return True\n return False", "def _enum_should_collect(self, enum):\n last_require_version = 0\n for feature in self.enum_required_by_feature[enum]:\n last_require_version = max(last_require_version, feature['number'])\n\n last_remove_version = 0\n for feature in self.enum_removed_by_feature[enum]:\n last_remove_version = max(last_remove_version, feature['number'])\n\n for extension in self.enum_required_by_extension[enum]:\n extension_name = extension['name']\n if extension_name in self.extensions_to_collect:\n #print(f'Collecting enum {enum} because it is required by {extension_name}')\n return True\n\n # filter by command not required by core profile\n if last_require_version == 0:\n return False\n\n # filter by removed\n if last_remove_version > last_require_version:\n return False\n\n return True", "def separately_configurable(self):\n return False", "def separately_configurable(self):\n return False", "def _filter(self):\n if self.properties['reason'] in PoliceReport.reason_filter:\n return False\n return True", "def _suppress(self, key):\n return key in self.SUPPRESS", "def test_enable_retainUnsent():\n config_info = read_config()\n config_info['retainUnsent'] = True\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['retainUnsent'] is True", "def allow_map_to_audit(self):\n return self.audit_id is None and self.audit is None", "def separately_configurable(self):\n return True", "def update_water_collecting():\n if init.game_state.rain_water_uses > 0 or init.game_state.current_location[\"Key\"] in cs.water_locations:\n sc.sm.get_screen(\"game\").ids.water_collecting.disabled = False\n else:\n sc.sm.get_screen(\"game\").ids.water_collecting.disabled = True", "def test_exclude_include_overlapping_for_configitem(capsys):\n\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA() as cr:\n cr.aa = 1\n with item(mc_include=[g_dev12_3, pp], mc_exclude=[g_dev12]) as it:\n it.setattr('anattr', pp=1, g_dev12_3=2)\n it.setattr('b', pp=1, dev3=0)\n it.setattr('anotherattr', default=111)\n return cr\n\n cr = config(prod).ItemWithAA\n assert cr.aa == 1\n assert not cr.item\n assert compare_json(cr, _include_exclude_for_configitem_expected_json, test_excluded=True)\n\n cr = config(dev1).ItemWithAA\n assert cr.aa == 1\n assert not cr.item\n\n cr = config(dev2).ItemWithAA\n assert cr.aa == 1\n assert not cr.item\n\n cr = config(dev3).ItemWithAA\n assert cr.aa == 1\n assert cr.item\n assert cr.item.anattr == 2\n assert cr.item.b == 0\n assert cr.item.anotherattr == 111\n\n cr = config(pp).ItemWithAA\n assert cr.aa == 1\n assert cr.item\n assert cr.item.anattr == 1\n assert cr.item.b == 1\n assert cr.item.anotherattr == 111", "def metadata(self):\r\n collectdetails = r'collectFileDetails'\r\n if collectdetails in self._vsaSubclientProp:\r\n vsasubclient_collect_details = self._vsaSubclientProp[collectdetails]\r\n else:\r\n vsasubclient_collect_details = False\r\n return vsasubclient_collect_details", "def should_profile():\n if util.dev_server:\n return _config.should_profile_development()\n else:\n return _config.should_profile_production()", "def __contains__(self, name):\n if name not in self.ALLOWED_EXCLUDES or name not in self.data.keys():\n return False\n else:\n return True", "def check(self, context):\r\n return context.config.preset is not None", "def test_exclude_include_overlapping_resolved_with_include_for_configitem():\n\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA() as cr:\n cr.aa = 1\n with item(mc_include=[g_dev12, pp, dev2], mc_exclude=[g_dev23]) as it:\n it.setattr('anattr', pp=1, g_dev12_3=2)\n it.setattr('b', pp=1, dev2=0)\n it.setattr('anotherattr', default=111)\n return cr\n\n cr = config(prod).ItemWithAA\n assert not cr.item\n assert compare_json(cr, _include_exclude_for_configitem_expected_json, test_excluded=True)\n\n cr = config(dev1).ItemWithAA\n assert cr.item\n\n cr = config(dev2).ItemWithAA\n assert cr.item\n assert cr.item.b == 0\n\n cr = config(dev3).ItemWithAA\n assert not cr.item\n\n cr = config(pp).ItemWithAA\n assert cr.item\n assert cr.item.anattr == 1\n assert cr.item.b == 1\n assert cr.item.anotherattr == 111", "def test_exclude_include_overlapping_resolved_with_exclude_for_configitem():\n\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA() as cr:\n cr.aa = 1\n with item(mc_include=[g_dev12, pp], mc_exclude=[dev2, g_dev23]) as it:\n it.setattr('anattr', pp=1, g_dev12_3=2)\n it.setattr('b', pp=1)\n it.setattr('anotherattr', default=111)\n return cr\n\n cr = config(prod).ItemWithAA\n assert not cr.item\n assert compare_json(cr, _include_exclude_for_configitem_expected_json, test_excluded=True)\n\n cr = config(dev1).ItemWithAA\n assert cr.item\n\n cr = config(dev2).ItemWithAA\n assert not cr.item\n\n cr = config(dev3).ItemWithAA\n assert not cr.item\n\n cr = config(pp).ItemWithAA\n assert cr.item\n assert cr.item.anattr == 1\n assert cr.item.b == 1\n assert cr.item.anotherattr == 111", "def is_applicable(self, context: Any) -> bool:\n pass", "def _filter_capabilities(self, events):\n return [x for x in events if Capability.has(x)]", "def test_enable_retainUnsent_logs():\n stmt = sqlalchemy.select([_LOGGING_TABLE.c.total_unsent_rows_removed]).select_from(_LOGGING_TABLE).order_by(\n _LOGGING_TABLE.c.id.desc()).limit(1)\n config_info = read_config()\n config_info['retainUnsent'] = True\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n\n time.sleep(convert_sleep(config_info['wait'])*2)\n result = execute_command_with_return_value(stmt)\n\n assert int(result[0][0]) == 0", "def test_disable_retainUnsent():\n config_info = read_config()\n config_info['retainUnsent'] = False\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['retainUnsent'] is False", "def has_configuration_set():\r\n return getattr(settings, \"MICROSITE_CONFIGURATION\", False)", "def metadata(self, value=True):\r\n collectdetails = r'collectFileDetails'\r\n if collectdetails in self._vsaSubclientProp:\r\n self._set_subclient_properties(\"_vsaSubclientProp['collectFileDetails']\", value)", "def test_extra_field_when_not_requested(self):\n self.client.login(username=self.admin_user.username, password='test')\n response = self.verify_response(params={\n 'all_blocks': True,\n 'requested_fields': ['course_visibility'],\n })\n self.verify_response_block_dict(response)\n for block_data in response.data['blocks'].values():\n assert 'other_course_settings' not in block_data\n\n self.assert_in_iff(\n 'course_visibility',\n block_data,\n block_data['type'] == 'course'\n )", "def skip_field_info_validation(config):\n\n reformatters = ['PCPCombine', 'RegridDataPlane']\n process_list = [item[0] for item in get_process_list(config)]\n\n # if running MTD in single mode, you don't need matching FCST/OBS\n if 'MTD' in process_list and config.getbool('config', 'MTD_SINGLE_RUN'):\n return True\n\n # if running any app other than the reformatters, you need matching FCST/OBS, so don't skip\n if [item for item in process_list if item not in reformatters]:\n return False\n\n return True", "def check_logging(self, elb_item):\n logging = elb_item.config.get('Attributes', {}).get('AccessLog', {})\n if not logging:\n self.add_issue(1, Categories.RECOMMENDATION, elb_item, notes='Enable access logs')\n return\n\n if not logging.get('Enabled'):\n self.add_issue(1, Categories.RECOMMENDATION, elb_item, notes='Enable access logs')\n return", "def test_guess_and_set_use_collection_not_boolean(self) -> None:\n\n config_loader = ConfigLoader()\n config_loader.set_custom_config({\"lookup\": {\"collection\": None}}).start()\n\n self.checker.guess_and_set_use_collection()\n actual = self.checker.use_collection\n expected = False\n\n self.assertEqual(expected, actual)\n\n del config_loader", "def wants_event(self, event_name: str, args: Dict) -> bool:\n ret = True\n if self.event_filter and event_name not in self.event_filter:\n ret = False\n elif self.active_monitor_filter and 'monitor' in args and args['monitor'].monitor_type == 'active' \\\n and args['monitor'].id not in self.active_monitor_filter:\n ret = False\n return ret", "def disability_specify(self, instance):\r\n return instance.user.profile.disability_specify", "def IgnorePersistedDecision(self) -> bool:", "def is_asset_based_activity(self):\n return bool(self._my_map['assetIds'])", "def matches_config(cls, config):\n return (not config.measures) or all(me in cls.available_measures for me in config.measures)", "def is_restricted_download(self):\n return self.has_label(RESTRICTEDDOWNLOAD_LABEL)", "def is_excluded(self, attr_name, request):\n return False", "def suppress_analyze(more_exclusions=None):\n return api.override_step_data(\n 'read filter exclusion spec',\n api.json.output({\n 'base': {\n 'exclusions': ['f.*'] + (more_exclusions or []),\n },\n 'chromium': {\n 'exclusions': [],\n },\n })\n )", "def skip_experiment(conf):\n return (\n (conf.dataset == 'rfw' and conf.feature == 'arcface')\n or (conf.dataset == 'bfw' and conf.feature == 'facenet')\n )", "def test_disable_retainUnsent_logs():\n stmt = sqlalchemy.select([_LOGGING_TABLE.c.total_unsent_rows_removed]).select_from(_LOGGING_TABLE).order_by(\n _LOGGING_TABLE.c.id.desc()).limit(1)\n config_info = read_config()\n config_info['retainUnsent'] = False\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n\n time.sleep(convert_sleep(config_info['wait'])*2)\n result = execute_command_with_return_value(stmt)\n\n assert int(result[0][0]) >= 0", "def test_otoroshi_controllers_adminapi_analytics_controller_filterable_events(self):\n pass", "def check(self, context):\r\n return context.config.stopAt is not None", "def _should_profile_development_default():\n return True", "def available(\n\t\t\tconfig_file):\n\t\treturn", "def _filter_capabilities(self, events): \n events_out = [x for x in events if Capability.has(x)]\n return events_out", "def is_suppressed(self):\n return self._is_record_status(self.SUPPRESSED)", "def cart_excluded(self,cart):\n\t\tfor ex_cart in self.excludes['cart_exclude']:\n\t\t\tif cart == ex_cart:\n\t\t\t\tprint \" \u001b[43mExcluding:\u001b[m %s (File list will be pulled from the database)\" % (cart)\n\t\t\t\treturn True\n\t\treturn False", "def require_partition_filter(self) -> bool:\n return pulumi.get(self, \"require_partition_filter\")", "def filterattrs(event):\n for a in ['description', 'summary', 'location']:\n if not hasattr(event, a):\n return False\n return True", "def excluded_from_scan(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/excludedFromScan/')))", "def is_activity_only(self):\n return self._tag == 'activity_only'", "def is_assessment_based_activity(self):\n return 'assessmentIds' in self._my_map and bool(self._my_map['assessmentIds'])", "def disable_consumer_stats(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_consumer_stats\")", "def no_filter(blast_subject_entry):\r\n return True", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n review_request = context.get('review_request')\n\n return (super().should_render(context=context) and\n review_request is not None and\n review_request.public and\n not is_site_read_only_for(context['request'].user))", "def should_be_included(self):\n return True", "def test_sources_not_ok_on_config_error(self):\n measurement = self.measurement(\n self.metric(metric_type=\"sentiment\"),\n sources=[\n {\"source_uuid\": SOURCE_ID, \"value\": \"5\", \"total\": \"100\", \"parse_error\": None, \"connection_error\": None},\n {\n \"source_uuid\": SOURCE_ID2,\n \"value\": \"7\",\n \"total\": \"100\",\n \"parse_error\": None,\n \"connection_error\": None,\n },\n ],\n )\n self.assertFalse(measurement.sources_ok())", "def test_guess_and_set_use_collection_no_configuration(self) -> None:\n\n self.checker.guess_and_set_use_collection()\n actual = self.checker.use_collection\n expected = False\n\n self.assertEqual(expected, actual)", "def _is_supplied_by_config(group: argparse._MutuallyExclusiveGroup, conf: Dict[str, Any]) -> bool:\n group_args = []\n for arg in group._group_actions:\n group_args.append(arg.dest)\n\n count = 0\n for val in group_args:\n if val in conf:\n count += 1\n return count == len(group_args) or count == 0", "def Whitelisted(path):\n return os.path.basename(path) == 'OWNERS'", "def is_blacklisted(self):\r\n \r\n in_blacklist = False \r\n if self.chrompos in parser.blacklist:\r\n in_blacklist = True\r\n \r\n return in_blacklist", "def should_run(self, opt: dict, blacklist_status=['done', 'started']):\n if self._has_run(opt, blacklist_status):\n return False\n\n results = copy.deepcopy(opt)\n results['status'] = 'started'\n for k in self._ignore_keys:\n if k in results:\n del results[k]\n self._collect.insert_one(results)\n return True", "def test_itar_restrict_software_asset(self):\n pass", "def test_otoroshi_controllers_adminapi_analytics_controller_filterable_stats(self):\n pass", "def should_render(\n self,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n unified_banner_feature.is_enabled(request=request))", "def ignore_listings(name_key):\n # for blacklist_str in models_blacklist:\n # if blacklist_str in name_key:\n # return True\n return False", "def check_config(self):\n # Check if tool is at all included in workflow\n if \"external\" not in self.config[\"tools\"][\"dna\"]:\n return # External not run, don't check configuration # pragma: no cover", "def test_itar_restrict_asset(self):\n pass", "def is_ignored(self):", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n not unified_banner_feature.is_enabled(request=request))", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n not unified_banner_feature.is_enabled(request=request))", "def item_filter(item):\n\tcch_geoserver_services = get_only_cch_geoserver_services(item['services'])\n\thas_cch_geoserver_services = 0 != len(cch_geoserver_services)\n\tis_data = 'data' == item['itemType']\n\treturn is_data and has_cch_geoserver_services;", "def import_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"import_only\")", "def __contains__(self, item):\n return self.settings.has(item)", "def test_should_render_with_user_in_read_only(self) -> None:\n self.request.user = User.objects.get(username='doc')\n\n # Turning on read-only mode prevents creation of some objects so call\n # _create_request_context first.\n request_context = self._create_request_context(user=self.request.user)\n\n settings = {\n 'site_read_only': True,\n }\n\n with override_feature_check(unified_banner_feature.feature_id, False):\n with self.siteconfig_settings(settings):\n if getattr(self, 'read_only_always_show', False):\n self.assertTrue(\n self.action.should_render(context=request_context))\n else:\n self.assertFalse(\n self.action.should_render(context=request_context))", "def _check_config(self):", "def _should_profile(self) -> bool:\n if \"profile\" in self._allowed_plugins:\n if not self._one_shot:\n raise ValueError(\n \"Profile plugin currently only supported for one shot.\"\n )\n logger.info(\"Profile plugin is enalbed.\")\n return True\n return False", "def test_app_is_production(self):\n self.assertFalse(app.config['DEBUG'])\n self.assertFalse(app.config['TESTING'])", "def remote_publishing():\n return hasattr(settings, 'NEWS_REMOTE_PUBLISHING')", "def _target_filter(self, obj):\r\n return type(obj).__name__ in ['Cube'] and not obj.is_grasped # List because may be extended to other objects.\r", "def import_only(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"import_only\")", "def _filter(self, entry):\n host = entry.get('@source_host', '')\n\n # errors will most likely come from job-s1\n if not is_from_production_host(host):\n return False\n\n return True", "def must_skip(self, item):\n user = c.user if c.user_is_loggedin else None\n\n if hasattr(item, \"promoted\") and item.promoted is not None:\n return False\n\n # can_view_slow only exists for Messages, but checking was_comment\n # is also necessary because items may also be comments that are being\n # viewed from the inbox page where their render class is overridden.\n # This check needs to be done before looking at whether they can view\n # the subverbify, or modmail to/from private subverbifys that the user\n # doesn't have access to will be skipped.\n if hasattr(item, 'can_view_slow') and not item.was_comment:\n return not item.can_view_slow()\n\n if hasattr(item, 'subverbify') and not item.subverbify.can_view(user):\n return True", "def filterVarForWizard(self, v):\n return v.isMeasurement()", "def app_config_has(self, field) -> bool:\n if not self.app_config():\n return False\n return field in self.app_config()", "def _task_filter(self, task):\n name = task.Name()\n if name.startswith('PS_CallAccountReports') and name.endswith('SERVER'):\n return True\n return False", "def __contains__(self, attr):\n return attr in self._config", "def test_itar_restrict_test_asset(self):\n pass", "def is_to_filter(self):\n if not self.app.args.filter is None:\n # Check the flag value to evite problem in search process\n ok = self.validate_value_flag()\n\n if ok is False:\n fatal([\n 'Invalid value for \"value\" flag',\n 'The value flag is required to filter',\n 'Use instead:',\n '$ tasks-app show --filter/-f={} --value/-v=VALUE'.format(self.app.args.filter),\n ])\n else:\n return True\n else:\n return False", "def check_settings(self):\r\n pass", "def is_production_mode(self):\n return getattr(self.env, 'mode', None) == 'production'", "def _validate_mostly_config(configuration: ExpectationConfiguration) -> None:\n if \"mostly\" in configuration.kwargs:\n mostly = configuration.kwargs[\"mostly\"]\n assert isinstance(\n mostly, (int, float)\n ), \"'mostly' parameter must be an integer or float\"\n assert 0 <= mostly <= 1, \"'mostly' parameter must be between 0 and 1\"" ]
[ "0.6463033", "0.58618915", "0.58207494", "0.56816643", "0.56375605", "0.55994993", "0.54857254", "0.54495823", "0.53826636", "0.5368159", "0.5362893", "0.5337729", "0.533311", "0.532473", "0.532473", "0.5313702", "0.51993185", "0.5181066", "0.5178171", "0.5176643", "0.51311594", "0.51213557", "0.510382", "0.5101778", "0.5080732", "0.5070768", "0.5067113", "0.5042677", "0.50423217", "0.50271803", "0.50206983", "0.49959075", "0.49923852", "0.49816582", "0.49704346", "0.4966844", "0.49529395", "0.49479827", "0.49430126", "0.493658", "0.49174294", "0.4915379", "0.49133077", "0.49005222", "0.49002337", "0.48903608", "0.48779634", "0.48723802", "0.48688313", "0.48686665", "0.48609", "0.4859869", "0.48571417", "0.4854073", "0.48338333", "0.48183778", "0.48148435", "0.48087817", "0.4807506", "0.48030955", "0.4802906", "0.4802412", "0.47864372", "0.47851217", "0.47812578", "0.4780375", "0.47697178", "0.47691366", "0.4767451", "0.47639224", "0.47613677", "0.47429466", "0.47411072", "0.47380927", "0.47355205", "0.47338578", "0.47289705", "0.47276056", "0.47276056", "0.47194725", "0.47185427", "0.47128785", "0.47114035", "0.46967143", "0.46902248", "0.46852475", "0.46814635", "0.46784323", "0.46770135", "0.46759722", "0.46673334", "0.46609628", "0.46497893", "0.46446067", "0.4640269", "0.46395203", "0.46376178", "0.4637154", "0.46318084", "0.4630867" ]
0.54581964
7
Test if the auditor respects "collect_only" config item
def test_ignore_tag(cinq_test_service): # Prep setup_info = setup_test_aws(cinq_test_service) account = setup_info['account'] prep_s3_testing(cinq_test_service) # Add resources client = aws_get_client('s3') bucket_name = dbconfig.get('test_bucket_name', NS_CINQ_TEST, default='testbucket') client.create_bucket(Bucket=bucket_name) client.put_bucket_tagging( Bucket=bucket_name, Tagging={'TagSet': IGNORE_TAGSET} ) # Collect resources collect_resources(account=account, resource_types=['s3']) # Initialize auditor auditor = MockRequiredTagsAuditor() # Setup test cinq_test_service.modify_resource( bucket_name, 'creation_date', '2000-01-01T00:00:00' ) auditor.run() assert not auditor._cinq_test_notices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pytest_ignore_collect(path: Any, config: Config) -> bool:\n if config.option.functional:\n return True\n if config.option.markexpr and \"wip\" in config.option.markexpr:\n return False # collect when looking for markers\n return not (config.option.integration or config.option.integration_only)", "def can_be_collected(self):\n return True", "def is_collecting(self) -> bool:\n return self.orders and self.orders[0].ability.id in {AbilityId.HARVEST_GATHER, AbilityId.HARVEST_RETURN}", "def collect_allowed(message):\n return True", "def _should_profile_production_default():\n return False", "def audit_only(self) -> bool:\n result = True\n for effect in self.allowed_effects:\n if effect not in [\"disabled\", \"audit\", \"auditifnotexists\"]:\n result = False\n return result", "def filter(self, record):\n\n if record.exc_info:\n is_included = 0\n else:\n is_included = 1\n return is_included", "def test_collect_only(cinq_test_service):\n\n # Prep\n setup_info = setup_test_aws(cinq_test_service)\n account = setup_info['account']\n\n prep_s3_testing(cinq_test_service, collect_only=True)\n\n # Add resources\n client = aws_get_client('s3')\n bucket_name = dbconfig.get('test_bucket_name', NS_CINQ_TEST, default='testbucket')\n client.create_bucket(Bucket=bucket_name)\n\n # Collect resources\n collect_resources(account=account, resource_types=['s3'])\n\n # Initialize auditor\n auditor = MockRequiredTagsAuditor()\n\n # Setup test\n cinq_test_service.modify_resource(\n bucket_name,\n 'creation_date',\n '2000-01-01T00:00:00'\n )\n\n auditor.run()\n assert not auditor._cinq_test_notices", "def _must_skip(self):\n if not self.magento_record :\n return \"Product attribute can not imported because it is not importable.\"\n apply_to = self.magento_record.get('apply_to')\n if apply_to and len(apply_to) > 0 and 'simple' not in apply_to:\n return \"Product attribute can not imported because it not for simple product.\"\n return", "def is_experiment(cfg):\n if CONDITIONS in list(cfg.keys()):\n return True\n else:\n return False", "def configured(self):\n return super().configured and self.max_harvest is not None", "def is_condition(cfg):\n if SELECTIONS in list(cfg.keys()):\n return True\n else:\n return False", "def pytest_ignore_collect(path):\n if not WITH_ASYNC and basename(str(path)) in async_files:\n return True\n return False", "def _enum_should_collect(self, enum):\n last_require_version = 0\n for feature in self.enum_required_by_feature[enum]:\n last_require_version = max(last_require_version, feature['number'])\n\n last_remove_version = 0\n for feature in self.enum_removed_by_feature[enum]:\n last_remove_version = max(last_remove_version, feature['number'])\n\n for extension in self.enum_required_by_extension[enum]:\n extension_name = extension['name']\n if extension_name in self.extensions_to_collect:\n #print(f'Collecting enum {enum} because it is required by {extension_name}')\n return True\n\n # filter by command not required by core profile\n if last_require_version == 0:\n return False\n\n # filter by removed\n if last_remove_version > last_require_version:\n return False\n\n return True", "def separately_configurable(self):\n return False", "def separately_configurable(self):\n return False", "def _filter(self):\n if self.properties['reason'] in PoliceReport.reason_filter:\n return False\n return True", "def _suppress(self, key):\n return key in self.SUPPRESS", "def test_enable_retainUnsent():\n config_info = read_config()\n config_info['retainUnsent'] = True\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['retainUnsent'] is True", "def separately_configurable(self):\n return True", "def allow_map_to_audit(self):\n return self.audit_id is None and self.audit is None", "def update_water_collecting():\n if init.game_state.rain_water_uses > 0 or init.game_state.current_location[\"Key\"] in cs.water_locations:\n sc.sm.get_screen(\"game\").ids.water_collecting.disabled = False\n else:\n sc.sm.get_screen(\"game\").ids.water_collecting.disabled = True", "def test_exclude_include_overlapping_for_configitem(capsys):\n\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA() as cr:\n cr.aa = 1\n with item(mc_include=[g_dev12_3, pp], mc_exclude=[g_dev12]) as it:\n it.setattr('anattr', pp=1, g_dev12_3=2)\n it.setattr('b', pp=1, dev3=0)\n it.setattr('anotherattr', default=111)\n return cr\n\n cr = config(prod).ItemWithAA\n assert cr.aa == 1\n assert not cr.item\n assert compare_json(cr, _include_exclude_for_configitem_expected_json, test_excluded=True)\n\n cr = config(dev1).ItemWithAA\n assert cr.aa == 1\n assert not cr.item\n\n cr = config(dev2).ItemWithAA\n assert cr.aa == 1\n assert not cr.item\n\n cr = config(dev3).ItemWithAA\n assert cr.aa == 1\n assert cr.item\n assert cr.item.anattr == 2\n assert cr.item.b == 0\n assert cr.item.anotherattr == 111\n\n cr = config(pp).ItemWithAA\n assert cr.aa == 1\n assert cr.item\n assert cr.item.anattr == 1\n assert cr.item.b == 1\n assert cr.item.anotherattr == 111", "def metadata(self):\r\n collectdetails = r'collectFileDetails'\r\n if collectdetails in self._vsaSubclientProp:\r\n vsasubclient_collect_details = self._vsaSubclientProp[collectdetails]\r\n else:\r\n vsasubclient_collect_details = False\r\n return vsasubclient_collect_details", "def should_profile():\n if util.dev_server:\n return _config.should_profile_development()\n else:\n return _config.should_profile_production()", "def __contains__(self, name):\n if name not in self.ALLOWED_EXCLUDES or name not in self.data.keys():\n return False\n else:\n return True", "def check(self, context):\r\n return context.config.preset is not None", "def test_exclude_include_overlapping_resolved_with_include_for_configitem():\n\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA() as cr:\n cr.aa = 1\n with item(mc_include=[g_dev12, pp, dev2], mc_exclude=[g_dev23]) as it:\n it.setattr('anattr', pp=1, g_dev12_3=2)\n it.setattr('b', pp=1, dev2=0)\n it.setattr('anotherattr', default=111)\n return cr\n\n cr = config(prod).ItemWithAA\n assert not cr.item\n assert compare_json(cr, _include_exclude_for_configitem_expected_json, test_excluded=True)\n\n cr = config(dev1).ItemWithAA\n assert cr.item\n\n cr = config(dev2).ItemWithAA\n assert cr.item\n assert cr.item.b == 0\n\n cr = config(dev3).ItemWithAA\n assert not cr.item\n\n cr = config(pp).ItemWithAA\n assert cr.item\n assert cr.item.anattr == 1\n assert cr.item.b == 1\n assert cr.item.anotherattr == 111", "def test_exclude_include_overlapping_resolved_with_exclude_for_configitem():\n\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA() as cr:\n cr.aa = 1\n with item(mc_include=[g_dev12, pp], mc_exclude=[dev2, g_dev23]) as it:\n it.setattr('anattr', pp=1, g_dev12_3=2)\n it.setattr('b', pp=1)\n it.setattr('anotherattr', default=111)\n return cr\n\n cr = config(prod).ItemWithAA\n assert not cr.item\n assert compare_json(cr, _include_exclude_for_configitem_expected_json, test_excluded=True)\n\n cr = config(dev1).ItemWithAA\n assert cr.item\n\n cr = config(dev2).ItemWithAA\n assert not cr.item\n\n cr = config(dev3).ItemWithAA\n assert not cr.item\n\n cr = config(pp).ItemWithAA\n assert cr.item\n assert cr.item.anattr == 1\n assert cr.item.b == 1\n assert cr.item.anotherattr == 111", "def is_applicable(self, context: Any) -> bool:\n pass", "def _filter_capabilities(self, events):\n return [x for x in events if Capability.has(x)]", "def test_enable_retainUnsent_logs():\n stmt = sqlalchemy.select([_LOGGING_TABLE.c.total_unsent_rows_removed]).select_from(_LOGGING_TABLE).order_by(\n _LOGGING_TABLE.c.id.desc()).limit(1)\n config_info = read_config()\n config_info['retainUnsent'] = True\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n\n time.sleep(convert_sleep(config_info['wait'])*2)\n result = execute_command_with_return_value(stmt)\n\n assert int(result[0][0]) == 0", "def test_disable_retainUnsent():\n config_info = read_config()\n config_info['retainUnsent'] = False\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['retainUnsent'] is False", "def has_configuration_set():\r\n return getattr(settings, \"MICROSITE_CONFIGURATION\", False)", "def metadata(self, value=True):\r\n collectdetails = r'collectFileDetails'\r\n if collectdetails in self._vsaSubclientProp:\r\n self._set_subclient_properties(\"_vsaSubclientProp['collectFileDetails']\", value)", "def test_extra_field_when_not_requested(self):\n self.client.login(username=self.admin_user.username, password='test')\n response = self.verify_response(params={\n 'all_blocks': True,\n 'requested_fields': ['course_visibility'],\n })\n self.verify_response_block_dict(response)\n for block_data in response.data['blocks'].values():\n assert 'other_course_settings' not in block_data\n\n self.assert_in_iff(\n 'course_visibility',\n block_data,\n block_data['type'] == 'course'\n )", "def skip_field_info_validation(config):\n\n reformatters = ['PCPCombine', 'RegridDataPlane']\n process_list = [item[0] for item in get_process_list(config)]\n\n # if running MTD in single mode, you don't need matching FCST/OBS\n if 'MTD' in process_list and config.getbool('config', 'MTD_SINGLE_RUN'):\n return True\n\n # if running any app other than the reformatters, you need matching FCST/OBS, so don't skip\n if [item for item in process_list if item not in reformatters]:\n return False\n\n return True", "def check_logging(self, elb_item):\n logging = elb_item.config.get('Attributes', {}).get('AccessLog', {})\n if not logging:\n self.add_issue(1, Categories.RECOMMENDATION, elb_item, notes='Enable access logs')\n return\n\n if not logging.get('Enabled'):\n self.add_issue(1, Categories.RECOMMENDATION, elb_item, notes='Enable access logs')\n return", "def test_guess_and_set_use_collection_not_boolean(self) -> None:\n\n config_loader = ConfigLoader()\n config_loader.set_custom_config({\"lookup\": {\"collection\": None}}).start()\n\n self.checker.guess_and_set_use_collection()\n actual = self.checker.use_collection\n expected = False\n\n self.assertEqual(expected, actual)\n\n del config_loader", "def wants_event(self, event_name: str, args: Dict) -> bool:\n ret = True\n if self.event_filter and event_name not in self.event_filter:\n ret = False\n elif self.active_monitor_filter and 'monitor' in args and args['monitor'].monitor_type == 'active' \\\n and args['monitor'].id not in self.active_monitor_filter:\n ret = False\n return ret", "def disability_specify(self, instance):\r\n return instance.user.profile.disability_specify", "def IgnorePersistedDecision(self) -> bool:", "def matches_config(cls, config):\n return (not config.measures) or all(me in cls.available_measures for me in config.measures)", "def is_asset_based_activity(self):\n return bool(self._my_map['assetIds'])", "def is_excluded(self, attr_name, request):\n return False", "def is_restricted_download(self):\n return self.has_label(RESTRICTEDDOWNLOAD_LABEL)", "def suppress_analyze(more_exclusions=None):\n return api.override_step_data(\n 'read filter exclusion spec',\n api.json.output({\n 'base': {\n 'exclusions': ['f.*'] + (more_exclusions or []),\n },\n 'chromium': {\n 'exclusions': [],\n },\n })\n )", "def skip_experiment(conf):\n return (\n (conf.dataset == 'rfw' and conf.feature == 'arcface')\n or (conf.dataset == 'bfw' and conf.feature == 'facenet')\n )", "def test_disable_retainUnsent_logs():\n stmt = sqlalchemy.select([_LOGGING_TABLE.c.total_unsent_rows_removed]).select_from(_LOGGING_TABLE).order_by(\n _LOGGING_TABLE.c.id.desc()).limit(1)\n config_info = read_config()\n config_info['retainUnsent'] = False\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n\n time.sleep(convert_sleep(config_info['wait'])*2)\n result = execute_command_with_return_value(stmt)\n\n assert int(result[0][0]) >= 0", "def check(self, context):\r\n return context.config.stopAt is not None", "def test_otoroshi_controllers_adminapi_analytics_controller_filterable_events(self):\n pass", "def available(\n\t\t\tconfig_file):\n\t\treturn", "def _should_profile_development_default():\n return True", "def _filter_capabilities(self, events): \n events_out = [x for x in events if Capability.has(x)]\n return events_out", "def is_suppressed(self):\n return self._is_record_status(self.SUPPRESSED)", "def cart_excluded(self,cart):\n\t\tfor ex_cart in self.excludes['cart_exclude']:\n\t\t\tif cart == ex_cart:\n\t\t\t\tprint \" \u001b[43mExcluding:\u001b[m %s (File list will be pulled from the database)\" % (cart)\n\t\t\t\treturn True\n\t\treturn False", "def require_partition_filter(self) -> bool:\n return pulumi.get(self, \"require_partition_filter\")", "def filterattrs(event):\n for a in ['description', 'summary', 'location']:\n if not hasattr(event, a):\n return False\n return True", "def excluded_from_scan(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/excludedFromScan/')))", "def is_activity_only(self):\n return self._tag == 'activity_only'", "def disable_consumer_stats(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_consumer_stats\")", "def no_filter(blast_subject_entry):\r\n return True", "def is_assessment_based_activity(self):\n return 'assessmentIds' in self._my_map and bool(self._my_map['assessmentIds'])", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n review_request = context.get('review_request')\n\n return (super().should_render(context=context) and\n review_request is not None and\n review_request.public and\n not is_site_read_only_for(context['request'].user))", "def should_be_included(self):\n return True", "def test_guess_and_set_use_collection_no_configuration(self) -> None:\n\n self.checker.guess_and_set_use_collection()\n actual = self.checker.use_collection\n expected = False\n\n self.assertEqual(expected, actual)", "def test_sources_not_ok_on_config_error(self):\n measurement = self.measurement(\n self.metric(metric_type=\"sentiment\"),\n sources=[\n {\"source_uuid\": SOURCE_ID, \"value\": \"5\", \"total\": \"100\", \"parse_error\": None, \"connection_error\": None},\n {\n \"source_uuid\": SOURCE_ID2,\n \"value\": \"7\",\n \"total\": \"100\",\n \"parse_error\": None,\n \"connection_error\": None,\n },\n ],\n )\n self.assertFalse(measurement.sources_ok())", "def _is_supplied_by_config(group: argparse._MutuallyExclusiveGroup, conf: Dict[str, Any]) -> bool:\n group_args = []\n for arg in group._group_actions:\n group_args.append(arg.dest)\n\n count = 0\n for val in group_args:\n if val in conf:\n count += 1\n return count == len(group_args) or count == 0", "def Whitelisted(path):\n return os.path.basename(path) == 'OWNERS'", "def is_blacklisted(self):\r\n \r\n in_blacklist = False \r\n if self.chrompos in parser.blacklist:\r\n in_blacklist = True\r\n \r\n return in_blacklist", "def should_run(self, opt: dict, blacklist_status=['done', 'started']):\n if self._has_run(opt, blacklist_status):\n return False\n\n results = copy.deepcopy(opt)\n results['status'] = 'started'\n for k in self._ignore_keys:\n if k in results:\n del results[k]\n self._collect.insert_one(results)\n return True", "def test_itar_restrict_software_asset(self):\n pass", "def should_render(\n self,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n unified_banner_feature.is_enabled(request=request))", "def test_otoroshi_controllers_adminapi_analytics_controller_filterable_stats(self):\n pass", "def ignore_listings(name_key):\n # for blacklist_str in models_blacklist:\n # if blacklist_str in name_key:\n # return True\n return False", "def check_config(self):\n # Check if tool is at all included in workflow\n if \"external\" not in self.config[\"tools\"][\"dna\"]:\n return # External not run, don't check configuration # pragma: no cover", "def test_itar_restrict_asset(self):\n pass", "def is_ignored(self):", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n not unified_banner_feature.is_enabled(request=request))", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n not unified_banner_feature.is_enabled(request=request))", "def item_filter(item):\n\tcch_geoserver_services = get_only_cch_geoserver_services(item['services'])\n\thas_cch_geoserver_services = 0 != len(cch_geoserver_services)\n\tis_data = 'data' == item['itemType']\n\treturn is_data and has_cch_geoserver_services;", "def import_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"import_only\")", "def __contains__(self, item):\n return self.settings.has(item)", "def test_should_render_with_user_in_read_only(self) -> None:\n self.request.user = User.objects.get(username='doc')\n\n # Turning on read-only mode prevents creation of some objects so call\n # _create_request_context first.\n request_context = self._create_request_context(user=self.request.user)\n\n settings = {\n 'site_read_only': True,\n }\n\n with override_feature_check(unified_banner_feature.feature_id, False):\n with self.siteconfig_settings(settings):\n if getattr(self, 'read_only_always_show', False):\n self.assertTrue(\n self.action.should_render(context=request_context))\n else:\n self.assertFalse(\n self.action.should_render(context=request_context))", "def _check_config(self):", "def _should_profile(self) -> bool:\n if \"profile\" in self._allowed_plugins:\n if not self._one_shot:\n raise ValueError(\n \"Profile plugin currently only supported for one shot.\"\n )\n logger.info(\"Profile plugin is enalbed.\")\n return True\n return False", "def test_app_is_production(self):\n self.assertFalse(app.config['DEBUG'])\n self.assertFalse(app.config['TESTING'])", "def remote_publishing():\n return hasattr(settings, 'NEWS_REMOTE_PUBLISHING')", "def import_only(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"import_only\")", "def _target_filter(self, obj):\r\n return type(obj).__name__ in ['Cube'] and not obj.is_grasped # List because may be extended to other objects.\r", "def _filter(self, entry):\n host = entry.get('@source_host', '')\n\n # errors will most likely come from job-s1\n if not is_from_production_host(host):\n return False\n\n return True", "def must_skip(self, item):\n user = c.user if c.user_is_loggedin else None\n\n if hasattr(item, \"promoted\") and item.promoted is not None:\n return False\n\n # can_view_slow only exists for Messages, but checking was_comment\n # is also necessary because items may also be comments that are being\n # viewed from the inbox page where their render class is overridden.\n # This check needs to be done before looking at whether they can view\n # the subverbify, or modmail to/from private subverbifys that the user\n # doesn't have access to will be skipped.\n if hasattr(item, 'can_view_slow') and not item.was_comment:\n return not item.can_view_slow()\n\n if hasattr(item, 'subverbify') and not item.subverbify.can_view(user):\n return True", "def filterVarForWizard(self, v):\n return v.isMeasurement()", "def app_config_has(self, field) -> bool:\n if not self.app_config():\n return False\n return field in self.app_config()", "def _task_filter(self, task):\n name = task.Name()\n if name.startswith('PS_CallAccountReports') and name.endswith('SERVER'):\n return True\n return False", "def __contains__(self, attr):\n return attr in self._config", "def is_to_filter(self):\n if not self.app.args.filter is None:\n # Check the flag value to evite problem in search process\n ok = self.validate_value_flag()\n\n if ok is False:\n fatal([\n 'Invalid value for \"value\" flag',\n 'The value flag is required to filter',\n 'Use instead:',\n '$ tasks-app show --filter/-f={} --value/-v=VALUE'.format(self.app.args.filter),\n ])\n else:\n return True\n else:\n return False", "def check_settings(self):\r\n pass", "def test_itar_restrict_test_asset(self):\n pass", "def is_production_mode(self):\n return getattr(self.env, 'mode', None) == 'production'", "def _validate_mostly_config(configuration: ExpectationConfiguration) -> None:\n if \"mostly\" in configuration.kwargs:\n mostly = configuration.kwargs[\"mostly\"]\n assert isinstance(\n mostly, (int, float)\n ), \"'mostly' parameter must be an integer or float\"\n assert 0 <= mostly <= 1, \"'mostly' parameter must be between 0 and 1\"" ]
[ "0.6463544", "0.5860176", "0.58197325", "0.5681499", "0.56378657", "0.5598911", "0.54860127", "0.5456772", "0.5450269", "0.5384088", "0.5370349", "0.5365668", "0.53368825", "0.533252", "0.5327629", "0.5327629", "0.531436", "0.52009016", "0.5182213", "0.5179538", "0.51759523", "0.5131257", "0.5122531", "0.5103744", "0.510184", "0.50826114", "0.50709385", "0.50681", "0.5043766", "0.5042357", "0.50258386", "0.50211906", "0.49972102", "0.49945116", "0.49818656", "0.49720478", "0.49672773", "0.49533704", "0.49480543", "0.49427122", "0.49370524", "0.49164292", "0.49143595", "0.49133328", "0.4902605", "0.49022087", "0.48907372", "0.48786712", "0.48731187", "0.48688906", "0.4866774", "0.486183", "0.48608962", "0.48554784", "0.4854092", "0.4835523", "0.48213938", "0.48136765", "0.48108286", "0.48074418", "0.48043576", "0.48023808", "0.4800863", "0.4787959", "0.47858888", "0.47796538", "0.4779586", "0.47718343", "0.47704253", "0.4767788", "0.4763986", "0.47603273", "0.47424123", "0.47417784", "0.47385713", "0.4736368", "0.4732005", "0.47294623", "0.4728843", "0.4728843", "0.4720738", "0.4720468", "0.47153178", "0.47126883", "0.46978432", "0.46893123", "0.4685996", "0.46825668", "0.46786085", "0.46785164", "0.46759233", "0.46690106", "0.46608996", "0.4652349", "0.46453464", "0.46426335", "0.46383184", "0.46381858", "0.46377677", "0.46335188", "0.46319234" ]
0.0
-1
Test whether the auditor respects the alert schedule
def test_alert_schedule(cinq_test_service): setup_info = setup_test_aws(cinq_test_service) account = setup_info['account'] prep_s3_testing(cinq_test_service) # Add resources client = aws_get_client('s3') bucket_name = dbconfig.get('test_bucket_name', NS_CINQ_TEST, default='testbucket') client.create_bucket(Bucket=bucket_name) # Collect resources collect_resources(account=account, resource_types=['s3']) # Initialize auditor auditor = MockRequiredTagsAuditor() # Test 1 --- The auditor should not alert again as we are not at the next scheduled alert time auditor.run() assert auditor._cinq_test_notices auditor.run() assert not auditor._cinq_test_notices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_for_alerts(self, cr, uid, context=None):\n\n dept_obj = self.pool.get('hr.department')\n detail_obj = self.pool.get('hr.schedule.detail')\n attendance_obj = self.pool.get('hr.attendance')\n rule_obj = self.pool.get('hr.schedule.alert.rule')\n\n # TODO - Someone who cares about DST should fix ths\n #\n data = self.pool.get('res.users').read(\n cr, uid, uid, ['tz'], context=context)\n dtToday = datetime.strptime(\n datetime.now().strftime('%Y-%m-%d') + ' 00:00:00',\n '%Y-%m-%d %H:%M:%S')\n lcldtToday = timezone(data['tz'] and data['tz'] or 'UTC').localize(\n dtToday, is_dst=False)\n utcdtToday = lcldtToday.astimezone(utc)\n utcdtYesterday = utcdtToday + relativedelta(days=-1)\n strToday = utcdtToday.strftime('%Y-%m-%d %H:%M:%S')\n strYesterday = utcdtYesterday.strftime('%Y-%m-%d %H:%M:%S')\n\n dept_ids = dept_obj.search(cr, uid, [], context=context)\n for dept in dept_obj.browse(cr, uid, dept_ids, context=context):\n for employee in dept.member_ids:\n\n # Get schedule and attendance records for the employee for the\n # day\n #\n sched_detail_ids = detail_obj.search(\n cr, uid, [\n ('schedule_id.employee_id', '=', employee.id),\n '&',\n ('date_start', '>=', strYesterday),\n ('date_start', '<', strToday),\n ],\n order='date_start',\n context=context\n )\n attendance_ids = attendance_obj.search(\n cr, uid, [\n ('employee_id', '=', employee.id),\n '&',\n ('name', '>=', strYesterday),\n ('name', '<', strToday),\n ],\n order='name',\n context=context\n )\n\n # Run the schedule and attendance records against each active\n # rule, and create alerts for each result returned.\n #\n rule_ids = rule_obj.search(\n cr, uid, [('active', '=', True)], context=context)\n for rule in rule_obj.browse(\n cr, uid, rule_ids, context=context):\n res = rule_obj.check_rule(\n cr, uid, rule, detail_obj.browse(\n cr, uid, sched_detail_ids, context=context),\n attendance_obj.browse(\n cr, uid, attendance_ids, context=context),\n context=context\n )\n\n for strdt, attendance_id in res['punches']:\n # skip if it has already been triggered\n ids = self.search(\n cr, uid, [\n ('punch_id', '=', attendance_id),\n ('rule_id', '=', rule.id),\n ('name', '=', strdt),\n ], context=context)\n if len(ids) > 0:\n continue\n\n self.create(\n cr, uid, {\n 'name': strdt,\n 'rule_id': rule.id,\n 'punch_id': attendance_id,\n }, context=context\n )\n\n for strdt, detail_id in res['schedule_details']:\n # skip if it has already been triggered\n ids = self.search(\n cr, uid, [\n ('sched_detail_id', '=', detail_id),\n ('rule_id', '=', rule.id),\n ('name', '=', strdt),\n ], context=context)\n if len(ids) > 0:\n continue\n\n self.create(\n cr, uid, {\n 'name': strdt,\n 'rule_id': rule.id,\n 'sched_detail_id': detail_id,\n }, context=context\n )", "def checkUpstreamScheduler():", "def _isalarm(self):\n return self.dp.state()==PyTango.DevState.ALARM", "def is_triggered_police(self):\n return self == ArmingState.ALARMING", "def test_appointment_date(self):\n # Default for email\n appt_date = datetime.date.today() + datetime.timedelta(days=7) \n self.create_confirmed_notification(self.test_patient, appt_date)\n self.create_unconfirmed_notification(self.other_patient, appt_date)\n\n # run email job\n from aremind.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertPatientInMessage(message, self.test_patient)\n self.assertPatientInMessage(message, self.other_patient)\n self.assertPatientNotInMessage(message, self.unrelated_patient)", "def is_alarm():\n return _alarm", "def should_fire(self, time_domain, timestamp, window, context):\n pass", "def is_incident_to_alert(self, marketplace: MarketplaceVersions) -> bool:\n return False", "def test_alert_low(fd, formatter, event):\n ao = AlertObserver(3, fd=fd, formatter=formatter, event=event)\n ao.start()\n event.run_until_wait()\n for _ in range(360):\n ao.update(DummyLogRecord())\n event.run_until_wait() # 'alarm triggered'\n for _ in range(359):\n ao.update(DummyLogRecord())\n event.run_until_wait() # 'alarm recovered'\n event.stop()\n assert fd.getvalue() == 'alarm triggered\\nalarm recovered\\n'", "def exec_cond(message, session):\n if message[\"text\"] == buttons[\"schedule\"]:\n return True\n elif message[\"text\"] in get_days():\n session[\"state\"] = states[\"schedule\"]\n return True\n else:\n return False", "async def _check_schedule(self, now, last):\n\n if self._schedule is None:\n return\n\n for event in self._schedule.events:\n if event.begin <= now:\n if event.begin > last:\n await self._announce_event(event)", "def alerted(self) -> bool:\n\t\treturn self._raw_result['data']['alerted']", "def test_appointment_date(self):\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n unconfirmed = self.create_unconfirmed_notification(self.other_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertPatientInMessage(message, self.test_patient)\n self.assertPatientInMessage(message, self.other_patient)\n self.assertPatientNotInMessage(message, self.unrelated_patient)\n self.stopRouter()", "def test_get_alert(self):\n dweepy.set_alert(\n self.my_thing_id,\n ['[email protected]', '[email protected]'],\n test_alert_condition,\n test_key,\n )\n alert = dweepy.get_alert(self.my_thing_id, test_key)\n self.assertEqual(alert['condition'], test_alert_condition)", "def is_in_advent() -> bool:\n # Run the code from the 1st to the 24th\n return datetime.now(EST).day in range(1, 25) and datetime.now(EST).month == 12", "def test_meeting_status(self):\n pass", "def check_alert(self, event):\n \n # Get board logger\n board_logger = self.get_board_logger()\n\n # Loop for an hour and continue to alert every ten minutes \n current_time = datetime.now()\n end_time = current_time + timedelta(0, 60)\n # end_time = current_time + timedelta(hours=1)\n\n alarm_counter = 0\n while current_time < end_time:\n # Sleep for 10 minutes\n sleep(10);\n #sleep(600);\n\n # Prevent race condition between Board input_status and check_alert \n if GPIO.input(self.__pin) == 1:\n\n # Log alarm cycle\n alarm_counter += 1\n board_logger.info(\"Alarm Cycle #%s: Initiating event \" \n + \"alert.\", str(alarm_counter))\n\n # Call Event object's alert method\n event.alert(self.__ip, board_logger)\n\n # Get current time\n current_time = datetime.now()\n \n else:\n # Input status is 0 indicating recovery; Break out of loop and \n # return to main thread \n board_logger.info(\"Alarm state recovery.\") \n break\n \n # End of alert cycle; Return to main thread\n status = \"ALARM\" if self.get_input_status() else \"RECOVERY\"\n board_logger.info(\"End check alarm cycle. Current pin input \"\n + \"status is %s.\", status)", "def test_otoroshi_controllers_adminapi_events_controller_alert_events(self):\n pass", "def test_alerting_works():\n prometheus = ocs_ci.utility.prometheus.PrometheusAPI()\n alerts_response = prometheus.get(\n \"alerts\", payload={\"silenced\": False, \"inhibited\": False}\n )\n assert alerts_response.ok is True\n alerts = alerts_response.json()[\"data\"][\"alerts\"]\n log.info(f\"Prometheus Alerts: {alerts}\")\n assert len(alerts) > 0", "def test_autoscaling_schedules_unset(self) -> None:\n if self.prod_env:\n schedules = self.autoscaling.describe_scheduled_actions(AutoScalingGroupName='saints-xctf-server-prod-asg')\n self.assertTrue(len(schedules.get('ScheduledUpdateGroupActions')) == 0)\n else:\n self.assertTrue(all([\n self.validate_autoscaling_schedule('saints-xctf-server-online-weekday-morning',\n recurrence='30 11 * * 1-5', max_size=1, min_size=1, desired_size=1),\n self.validate_autoscaling_schedule('saints-xctf-server-offline-weekday-morning',\n recurrence='30 13 * * 1-5', max_size=0, min_size=0, desired_size=0),\n self.validate_autoscaling_schedule('saints-xctf-server-online-weekday-afternoon',\n recurrence='30 22 * * 1-5', max_size=1, min_size=1, desired_size=1),\n self.validate_autoscaling_schedule('saints-xctf-server-offline-weekday-night',\n recurrence='30 3 * * 2-6', max_size=0, min_size=0, desired_size=0),\n self.validate_autoscaling_schedule('saints-xctf-server-online-weekend', recurrence='30 11 * * 0,6',\n max_size=1, min_size=1, desired_size=1),\n self.validate_autoscaling_schedule('saints-xctf-server-offline-weekend', recurrence='30 3 * * 0,1',\n max_size=0, min_size=0, desired_size=0)\n ]))", "def test_success_edit(event_member):\n _, member, event_id = event_member\n current = date.today() + timedelta(days=1)\n start = datetime.combine(current, time(19, 30))\n end = start + timedelta(hours=2, minutes=30)\n edit(member.username, event_id, True, start, end)\n\n # Check that the user's availability was updated\n schedule = data.events[event_id].availabilities[member.username].times\n days_from_creation = 1\n start_index = 2 * start.hour + start.minute // 30\n end_index = 2 * end.hour + end.minute // 30\n\n for d in range(MAX_DAYS):\n if any(schedule[d]):\n print(d, schedule[d])\n for t in range(INTERVALS):\n if d == days_from_creation and start_index <= t < end_index:\n assert schedule[d][t]\n else:\n assert not schedule[d][t]", "def alert(self):\n\n # Get board logger\n board_logger = self.get_board_logger()\n\n # Create new Event object to handle event communication\n event = Event(datetime.now(), self.get_input_status())\n \n event.alert(self.__ip, board_logger)\n\n if (self.get_input_status() == 1):\n \n board_logger.info(\"Alarm state active; starting check alert \" \n + \"cycle for 6 cycles.\")\n \n self.check_alert(event)", "def test_notification_schedule(self):\n\n response = self.client.get(self.dashboard_url)\n self.assertEqual(response.status_code, 200)", "def test_notification_schedule(self):\n\n response = self.client.get(self.dashboard_url)\n self.assertEqual(response.status_code, 200)", "def test_alert_high(fd, formatter, event):\n ao = AlertObserver(3, fd=fd, formatter=formatter, event=event)\n ao.start()\n event.run_until_wait()\n for _ in range(360):\n ao.update(DummyLogRecord())\n event.run_until_wait() # 'alarm triggered'\n event.stop()\n assert fd.getvalue() == 'alarm triggered\\n'", "def sleep_in(weekday, vacation):\r\n if not weekday or vacation:\r\n return True\r\n return False", "def sleep_in(weekday, vacation):\r\n if not weekday or vacation:\r\n return True\r\n else:\r\n return False", "def test_alert_low_doesnt_fire_twice(fd, formatter, event):\n ao = AlertObserver(3, fd=fd, formatter=formatter, event=event)\n ao.start()\n event.run_until_wait()\n for _ in range(360):\n ao.update(DummyLogRecord())\n event.run_until_wait() # 'alarm triggered'\n for _ in range(359):\n ao.update(DummyLogRecord())\n event.run_until_wait() # 'alarm recovered'\n for _ in range(359):\n ao.update(DummyLogRecord())\n event.run_until_wait() # no print\n event.stop()\n assert fd.getvalue() == 'alarm triggered\\nalarm recovered\\n'", "def alert(self):\n\n self.loadData()\n\n for path, data in self.behavData:\n self.parseFilePath(path)\n\n # Only call the alarms on animals that we care about.\n if(self.subjectName in self.subjects):\n if(self.log):\n logging.debug(\"Using data from \" + self.subjectName)\n\n self.subjects.remove(self.subjectName)\n\n self.belowThresholdAlarm(data)\n self.aboveThresholdAlarm(data)\n\n self.isMissingDataAlarm()", "def isOn(self):\r\n return len(self.__agenda)>2", "def test_execute_monitoring_schedule_vendor_v3(self):\n pass", "def test_alert_high_doesnt_fire_twice(fd, formatter, event):\n ao = AlertObserver(3, fd=fd, formatter=formatter, event=event)\n ao.start()\n event.run_until_wait()\n for _ in range(360):\n ao.update(DummyLogRecord())\n event.run_until_wait() # 'alarm triggered'\n for _ in range(360):\n ao.update(DummyLogRecord())\n event.run_until_wait() # no print\n event.stop()\n assert fd.getvalue() == 'alarm triggered\\n'", "def test_eval_alarm(self):\n def get_state_update_value(h):\n \"\"\"\n \n \"\"\"\n oldstate = h.data['oldState']['stateValue']\n newstate = h.data['newState']['stateValue']\n querydate = h.data['newState']['stateReasonData']['queryDate']\n querydate = utils.parse_strtime(querydate)\n return oldstate, newstate, querydate \n \n test_uuid = str(uuid.uuid4())\n alarmname = \"TestEvalAlarm_\" + test_uuid\n metricname = \"TestEvalMetric_\" + test_uuid\n namespace = \"unittest\"\n unit = \"Percent\"\n dimensions = {\"test_id\":test_uuid}\n threshold = 2.0\n \n # create metric alarm\n alarm = MetricAlarm(name=alarmname, metric=metricname,\n namespace=namespace, statistic=\"Average\",\n comparison=\">\", threshold=threshold,\n period=60, evaluation_periods=1, unit=unit,\n dimensions=dimensions)\n self.synaps.put_metric_alarm(alarm)\n \n # due to put_metric_alarm is asynchronous\n time.sleep(ASYNC_WAIT)\n \n alarm_time = datetime.datetime.utcnow().replace(second=0,\n microsecond=0)\n self.synaps.put_metric_data(namespace=namespace, name=metricname,\n value=threshold + 1, timestamp=alarm_time,\n unit=unit, dimensions=dimensions)\n\n time.sleep(60 * 5)\n\n ok_time = datetime.datetime.utcnow().replace(second=0,\n microsecond=0) \n self.synaps.put_metric_data(namespace=namespace, name=metricname,\n value=threshold - 2, timestamp=ok_time,\n unit=unit, dimensions=dimensions)\n\n time.sleep(60 * 5)\n \n histories = self.synaps.describe_alarm_history(alarm_name=alarmname,\n history_item_type=\"StateUpdate\")\n histories.sort(cmp=lambda a, b: cmp(a.timestamp, b.timestamp))\n\n result = map(get_state_update_value, histories)\n \n expected = (('INSUFFICIENT_DATA', 'ALARM', alarm_time),\n ('ALARM', 'INSUFFICIENT_DATA', None),\n ('INSUFFICIENT_DATA', 'OK', ok_time),\n ('OK', 'INSUFFICIENT_DATA', None))\n \n failmsg = \"expected: %s real: %s\" % (expected, result)\n \n self.assertEqual(len(result), len(expected), msg=failmsg)\n \n for ((r_new, r_old, r_time), (e_new, e_old, e_time)) in zip(result,\n expected):\n self.assertEqual(r_new, e_new, msg=failmsg)\n self.assertEqual(r_old, e_old, msg=failmsg)\n if e_time:\n self.assertTrue((r_time - e_time) < timedelta(seconds=300),\n msg=failmsg)\n \n self.synaps.delete_alarms(alarms=[alarmname])", "def test_alert_create(self):\n pass", "def test_monitorclient_alert_email_rbac(self, monitor_commercial_setup_no_client):\n # first let's get the OK and CRITICAL email alerts {{{\n mailbox_path = \"/var/spool/mail/local\"\n wait_for_alert_interval_s = 8\n expected_from = \"[email protected]\"\n service_name = \"crond\"\n user_name = \"[email protected]\"\n devid, _, auth, mender_device = self.prepare_env(\n monitor_commercial_setup_no_client, user_name\n )\n logger.info(\"test_monitorclient_alert_email_rbac: env ready.\")\n\n prepare_service_monitoring(mender_device, service_name)\n time.sleep(2 * wait_for_alert_interval_s)\n\n mender_device.run(\"systemctl stop %s\" % service_name)\n logger.info(\n \"Stopped %s, sleeping %ds.\" % (service_name, wait_for_alert_interval_s)\n )\n time.sleep(wait_for_alert_interval_s)\n\n mail = monitor_commercial_setup_no_client.get_file(\"local-smtp\", mailbox_path)\n messages = parse_email(mail)\n assert len(messages) > 0\n\n m = messages[0]\n assert \"To\" in m\n assert \"From\" in m\n assert \"Subject\" in m\n assert m[\"To\"] == user_name\n assert m[\"From\"] == expected_from\n assert (\n m[\"Subject\"]\n == \"[CRITICAL] \" + service_name + \" on \" + devid + \" status: not-running\"\n )\n logger.info(\"test_monitorclient_alert_email_rbac: got CRITICAL alert email.\")\n\n mender_device.run(\"systemctl start %s\" % service_name)\n logger.info(\n \"Started %s, sleeping %ds\" % (service_name, wait_for_alert_interval_s)\n )\n time.sleep(wait_for_alert_interval_s)\n mail = monitor_commercial_setup_no_client.get_file(\"local-smtp\", mailbox_path)\n logger.debug(\"got mail: '%s'\", mail)\n messages = parse_email(mail)\n for m in messages:\n logger.debug(\"got message:\")\n logger.debug(\" body: %s\", m.get_body().get_content())\n logger.debug(\" To: %s\", m[\"To\"])\n logger.debug(\" From: %s\", m[\"From\"])\n logger.debug(\" Subject: %s\", m[\"Subject\"])\n\n messages_count = len(messages)\n assert messages_count > 1\n m = messages[1]\n assert \"To\" in m\n assert \"From\" in m\n assert \"Subject\" in m\n assert m[\"To\"] == user_name\n assert m[\"From\"] == expected_from\n assert (\n m[\"Subject\"] == \"[OK] \" + service_name + \" on \" + devid + \" status: running\"\n )\n logger.info(\"test_monitorclient_alert_email_rbac: got OK alert email.\")\n # }}} we got the CRITICAL and OK emails\n\n # let's add a role, that will allow user to view only devices of given group {{{\n uadm = ApiClient(\n host=get_container_manager().get_mender_gateway(),\n base_url=useradm.URL_MGMT,\n )\n\n role = {\n \"name\": \"deviceaccess\",\n \"permissions\": [\n {\n \"action\": \"VIEW_DEVICE\",\n \"object\": {\"type\": \"DEVICE_GROUP\", \"value\": \"fullTestDevices\"},\n }\n ],\n }\n res = uadm.call(\n \"POST\", useradm.URL_ROLES, headers=auth.get_auth_token(), body=role\n )\n assert res.status_code == 201\n logger.info(\n \"test_monitorclient_alert_email_rbac: added role: restrict access to a group.\"\n )\n # }}} role added\n\n # let's set the role for the user {{{\n res = uadm.call(\"GET\", useradm.URL_USERS, headers=auth.get_auth_token())\n assert res.status_code == 200\n logger.info(\n \"test_monitorclient_alert_email_rbac: \"\n \"get users: http rc: %d; response body: '%s'; \"\n % (res.status_code, res.json())\n )\n users = res.json()\n res = uadm.call(\n \"PUT\",\n useradm.URL_USERS_ID.format(id=users[0][\"id\"]),\n headers=auth.get_auth_token(),\n body={\"roles\": [\"deviceaccess\"]},\n )\n assert res.status_code == 204\n logger.info(\"test_monitorclient_alert_email_rbac: role assigned to user.\")\n # }}} user has access only to fullTestDevices group\n\n # let's stop the service by name=service_name\n mender_device.run(\"systemctl stop %s\" % service_name)\n logger.info(\n \"Stopped %s, sleeping %ds.\" % (service_name, wait_for_alert_interval_s)\n )\n time.sleep(wait_for_alert_interval_s)\n\n mail = monitor_commercial_setup_no_client.get_file(\"local-smtp\", mailbox_path)\n messages = parse_email(mail)\n assert len(messages) == messages_count\n # we did not receive any email -- user has no access to the device\n logger.info(\n \"test_monitorclient_alert_email_rbac: did not receive CRITICAL email alert.\"\n )\n\n mender_device.run(\"systemctl start %s\" % service_name)\n logger.info(\n \"Started %s, sleeping %ds\" % (service_name, wait_for_alert_interval_s)\n )\n time.sleep(wait_for_alert_interval_s)\n\n mail = monitor_commercial_setup_no_client.get_file(\"local-smtp\", mailbox_path)\n messages = parse_email(mail)\n assert len(messages) == messages_count\n # we did not receive any email -- user has no access to the device\n logger.info(\n \"test_monitorclient_alert_email_rbac: did not receive OK email alert.\"\n )", "def test_set_alert(self):\n alert = dweepy.set_alert(\n self.my_thing_id,\n ['[email protected]', '[email protected]'],\n test_alert_condition,\n test_key,\n )\n self.assertEqual(alert['condition'], test_alert_condition)", "def test_manage_report_schedule_enums(\n self, api_instance: Reports, report_type, schedule\n ):\n params = api_instance.manage_report_schedule(\n report_type=report_type,\n schedule=schedule,\n )\n self.assert_common_params(params, action=\"ManageReportSchedule\")\n assert params[\"ReportType\"] == \"_GET_STRANDED_INVENTORY_UI_DATA_\"\n assert params[\"Schedule\"] == \"_30_MINUTES_\"", "def check_wrong_time(self, cr, uid, att, context=None):\n # check have overtime yet?\n att_name = datetime.strptime(att.name, DEFAULT_SERVER_DATETIME_FORMAT)\n param_obj = self.pool.get('ir.config_parameter') \n max_early = param_obj.get_param(cr, uid, 'maximum_early_minutes', default=60)\n max_late = param_obj.get_param(cr, uid, 'maximum_late_minutes', default=60)\n try:\n max_early = int (max_early)\n max_late = int (max_late)\n except:\n raise except_osv(_(\"Warning !\"),_(\"maximum_early_minutes or maximum_late_minutes in config parameter is incorrect\"))\n \n time_early = att_name + timedelta(minutes = max_early)\n time_late = att_name - timedelta(minutes = max_late)\n \n overtime_obj = self.pool.get('hr.overtime')\n overtime_confirmed_ids = overtime_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('mode', '=', 'by_employee'),\n ('name', '=', att.day_tz),\n ('datetime_start', '<=', time_early.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('datetime_stop', '>=', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('state', 'in', ['confirmed'])\n ])\n if overtime_confirmed_ids:\n return False\n working_hour_obj = self.pool.get('hr.payroll.working.hour')\n \n \n \n \n working_hour_ids = working_hour_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('expected_start', '<=', time_early.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('expected_end', '>=', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ], context=context)\n if not working_hour_ids:\n return True\n return False", "def test_add_recurring_schedule(self):\n pass", "def test_check_alarm(self):\n\n fail_msg = \"Creation instance failed\"\n\n create_kwargs = {}\n if 'neutron' in self.config.network.network_provider:\n network = [net.id for net in\n self.compute_client.networks.list()\n if net.label == self.private_net]\n\n create_kwargs = {'nics': [{'net-id': network[0]}]}\n\n image = nmanager.get_image_from_name()\n name = rand_name('ost1_test-instance-alarm_actions')\n self.instance = self.verify(600, self.compute_client.servers.create, 1,\n fail_msg,\n \"server creation\",\n name=name,\n flavor=self.flavor,\n image=image,\n **create_kwargs)\n self.set_resource(self.instance.id, self.instance)\n\n self.verify(200, self._wait_for_instance_metrics, 2,\n \"instance is not available\",\n \"instance becoming 'available'\",\n self.instance, 'ACTIVE')\n\n fail_msg = \"Creation metrics failed.\"\n\n statistic_meter_resp = self.verify(600, self.wait_for_instance_metrics, 3,\n fail_msg,\n \"metrics created\",\n self.meter_name)\n\n fail_msg = \"Creation alarm failed.\"\n threshold = statistic_meter_resp[0].avg - 1\n create_alarm_resp = self.verify(5, self.create_alarm,\n 4, fail_msg, \"alarm_create\",\n meter_name=self.meter_name,\n threshold=threshold,\n name=self.name,\n period=self.period,\n statistic=self.statistic,\n comparison_operator=self.comparison_operator)\n\n fail_msg = \"Alarm verify state failed.\"\n\n self.verify(1000, self.wait_for_alarm_status, 5,\n fail_msg,\n \"alarm status becoming 'alarm'\",\n create_alarm_resp.alarm_id)", "def check_event_status(self):\n pass", "def test_overlapping_events(sample_events, woodshop, caplog):\n caplog.set_level(logging.INFO)\n event1, event2 = sample_events.make_overlapping_events()\n overlap_events(event1, event2, woodshop, woodshop, [woodshop])\n assert len(caplog.messages) == 1\n message = caplog.messages[0]\n assert \"Schedule conflict: place='Woodshop'\" in message\n expected_conflict_times = \"Conflict(start_time='{}', end_time='{}',\".format(\n event2.start_time, event1.end_time)\n assert expected_conflict_times in message\n assert event1.meetup_id in message\n assert event2.meetup_id in message", "def test_escalate(client):\n g.test_authorized_for = []\n res = client.get(\"/v0/escalate\" + get_request_args)\n assert \"Thanks! This alert has been escalated\" in res.data.decode(\"utf-8\")", "def alert(self, alert_code: Alert) -> bool:\n assert isinstance(alert_code, Alert), f\"alert_code {alert_code} is no Alert\"\n return alert_code.value in self.state.alerts", "def test_list_schedules(self):\n pass", "def test_execute_monitoring_schedule_manufacturer_v3(self):\n pass", "def test_check_user_cals(self):\n instmap = FakeOpenPulse2Q().defaults().instruction_schedule_map\n\n test_u1 = Schedule()\n test_u1 += ShiftPhase(Parameter(\"P0\"), DriveChannel(0))\n\n instmap.add(\"u1\", (0,), test_u1, arguments=[\"P0\"])\n publisher = instmap.get(\"u1\", (0,), P0=0).metadata[\"publisher\"]\n\n self.assertEqual(publisher, CalibrationPublisher.QISKIT)", "def test_alert_should_trigger(self, mock_print_alert):\n\n w = Worker(mock.Mock(), mock.Mock(), WORKER_CONFIG)\n\n w.tracking = deque([20, 20])\n\n w._alert()\n\n assert w.alert\n mock_print_alert.assert_called_with(\"TRIGGERED\")", "def test_get_monitoring_schedules_vendor_v3(self):\n pass", "def alert(self):\n now = datetime.now()\n for period, attempts in conf.LOGIN_GUARD_FREQUENCY_ALERT:\n start_time = now - timedelta(seconds=period)\n nb_events = LoginEvent.objects.\\\n filter(who=self.who, when__gt=start_time).\\\n count()\n if nb_events >= attempts:\n subject = u\"%s: %s\" % (_('alert login attempts'), self.who)\n message = u\"%s %s %s\" % (\n nb_events,\n _('attempts in'),\n timedelta(seconds=period))\n mail_admins(subject, message)", "def test_negative_is_active_of_homework():\n assert not expired_hw.is_active()", "def test_past_meeting_details(self):\n pass", "def test_calendar_query_todo_alarm(self):\n raise SkipTest(\"test unimplemented\")", "def audio_event_detection(self):\n # Test if trials already exist\n if 'TimeIntervals_speaker' not in self.model.nwb.intervals:\n # Test if file contains audio signals\n if any(name in self.model.nwb.stimulus for name in ['speaker1', 'speaker2']):\n AudioEventDetection(parent=self)\n else:\n NoAudioDialog()\n else:\n ExistIntervalsDialog()", "def test_booked_event_has_true_flag(self):\r\n user = ViewAfishaTests.mentor\r\n event = EventFactory(city=user.profile.city)\r\n EventParticipantFactory(\r\n event=event,\r\n user=user,\r\n )\r\n\r\n client = self.return_authorized_user_client(user)\r\n response_data = client.get(path=EVENTS_URL, format=\"json\").data\r\n\r\n results = response_data.get(\"results\")\r\n record_dict = results[0]\r\n\r\n self.assertEqual(\r\n \"True\",\r\n str(record_dict.get(\"booked\")),\r\n msg=(\r\n \"Проверьте, что у мероприятий на которые \"\r\n \"пользователь подписан возвращается флаг \"\r\n \"booked': True.\"\r\n ),\r\n )", "def test_has_ended(self):\r\n self.assertTrue(self.past_show_certs.has_ended())\r\n self.assertTrue(self.past_noshow_certs.has_ended())\r\n self.assertFalse(self.future_show_certs.has_ended())\r\n self.assertFalse(self.future_noshow_certs.has_ended())", "def alarm(bot, job):\n message = MESSAGES[job.context]\n if len(message) <= 0:\n message = \"Alert set for right now\"\n bot.sendMessage(job.context, text=message)", "def alarm_in_setup_change():\n setup_write(\"!M1 meas interval\", \"00:01:00\")\n setup_write(\"!M2 meas interval\", \"00:01:00\")\n setup_write(\"!TX3 scheduled interval\", \"00:05:00\")", "def test_careers_invalid_student(self):\n student_id = '1234567890'\n result = self.ucuenca.schedule(student_id)\n self.assertFalse(result)", "def check_event_available(self,eid,new_attend_num):\n event_info = self.get_event_info(eid)\n gacceptend = self.get_game_info(event_info['gid'],['gacceptend','gattend'])['gacceptend']\n if int(time.time()) > gacceptend: return 1 # attend end \n return 2 if int(event_info['eattend']) + int(new_attend_num) > int(event_info['emaxattend']) and int(event_info['emaxattend']) else True", "def check(self, critical_threshold):\n\n events = self._get_instances_pending_events()\n\n if not events:\n print 'OK: no pending events'\n return OK\n\n critical_events = []\n warning_events = []\n\n for event in events:\n event_time = datetime.strptime(event[3], '%Y-%m-%dT%H:%M:%S.000Z')\n # Are we close enough to the instance event that we should alert?\n if datetime.utcnow() > (event_time - timedelta(days=critical_threshold)):\n critical_events.append(event)\n else:\n warning_events.append(event)\n\n if critical_events:\n print 'CRITICAL: instances with events in %d days - %s' % (critical_threshold, \", \".join([\"%s(%s)\" % (event[0], event[1]) for event in critical_events]))\n return CRITICAL\n\n print 'WARNING: instances with scheduled events %s' % (\", \".join([\"%s(%s)\" % (event[0], event[1]) for event in warning_events]))\n return WARNING", "def test_meeting_polls(self):\n pass", "def test_alert_pop_up(self):\n\n # locators\n alert_button = 'alertbtn'\n\n # steps\n locate_alert_button = WebDriverWait(self.driver, 10).until(\n ec.visibility_of_element_located((By.ID, alert_button))\n )\n locate_alert_button.click()\n alert = self.driver.switch_to.alert\n print(alert.text)\n alert.accept()", "def test_alert_should_trigger_and_resolve(self, mock_print_alert):\n\n w = Worker(mock.Mock(), mock.Mock(), WORKER_CONFIG)\n\n w.tracking = deque([20, 20])\n\n # expects an alert here\n w._alert()\n\n assert w.alert\n assert not w.alert_history\n mock_print_alert.assert_called_with(\"TRIGGERED\")\n\n # simulate empty polls to lower average to 5\n for i in range(4):\n w.tracking.append(0)\n\n w._alert()\n\n assert not w.alert\n assert w.alert_history\n mock_print_alert.assert_called_with(\"REMOVED\")", "def test_same_start_events(sample_events, woodshop, caplog):\n caplog.set_level(logging.INFO)\n event1, event2 = sample_events.make_same_start_events()\n woodshop.start_event(event1)\n woodshop.start_event(event2)\n woodshop.log_conflicts(event2.start_time)\n woodshop.end_event(event1)\n woodshop.log_conflicts(event1.end_time)\n woodshop.end_event(event2)\n woodshop.log_conflicts(event2.end_time)\n assert len(caplog.messages) == 1\n message = caplog.messages[0]\n assert \"Schedule conflict: place='Woodshop'\" in message\n expected_conflict_times = \"Conflict(start_time='{}', end_time='{}',\".format(\n event1.start_time, event1.end_time)\n assert expected_conflict_times in message\n assert event1.meetup_id in message\n assert event2.meetup_id in message", "def test_ensure_not_ts_pass(self):\n self.assertEqual(ensure_not_ts(self.jobset1), 'completed')", "def test_alert_should_not_trigger(self, mock_print_alert):\n\n w = Worker(mock.Mock(), mock.Mock(), WORKER_CONFIG)\n\n w.tracking = deque([5, 5])\n\n w._alert()\n\n assert not w.alert\n mock_print_alert.assert_not_called()", "def testTrialEndedEarly(self):\n stats = self.default_statistics()\n trial_count = stats[str(0)][\"n\"] + 3\n sched, mock_runner = self.schedulerSetup(trial_count)\n\n t1, t2, t3 = sched._state[\"bracket\"].current_trials()\n for t in [t1, t2, t3]:\n mock_runner._launch_trial(t)\n\n sched.on_trial_complete(mock_runner, t3, result(1, 12))\n self.assertEqual(\n TrialScheduler.PAUSE,\n sched.on_trial_result(\n mock_runner, t1, result(stats[str(1)][\"r\"], 10)))\n self.assertEqual(\n TrialScheduler.CONTINUE,\n sched.on_trial_result(\n mock_runner, t2, result(stats[str(1)][\"r\"], 10)))", "def test_meeting_registrant_status(self):\n pass", "def check_overtime(self, cr, uid, att, context=None):\n if att:\n overtime_obj = self.pool.get('hr.overtime')\n orertime_ids = overtime_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('mode', '=', 'by_employee'),\n ('name', '=', att.day_tz),\n ('datetime_start', '<=', att.name),\n ('datetime_stop', '>=', att.name),\n ('state', 'not in', ['cancel', 'confirmed', 'done'])\n ])\n if orertime_ids:\n return True\n return False", "def test_admin_alarm_admin_list(self):\n response = self.client.get(\"/admin/appointment/alarm/\")\n self.assertEqual(response.status_code, 200)", "def test_same_end_events(sample_events, woodshop, caplog):\n caplog.set_level(logging.INFO)\n event1, event2 = sample_events.make_same_end_events()\n woodshop.start_event(event1)\n woodshop.log_conflicts(event1.start_time)\n woodshop.start_event(event2)\n woodshop.log_conflicts(event2.start_time)\n woodshop.end_event(event1)\n woodshop.end_event(event2)\n woodshop.log_conflicts(event2.end_time)\n assert len(caplog.messages) == 1\n message = caplog.messages[0]\n assert \"Schedule conflict: place='Woodshop'\" in message\n expected_conflict_times = \"Conflict(start_time='{}', end_time='{}',\".format(\n event2.start_time, event2.end_time)\n assert expected_conflict_times in message\n assert event1.meetup_id in message\n assert event2.meetup_id in message", "def alarm_t(self, **kwargs):\n if self.verbose:\n print(\"\\t{} |{}| Initialization begins.\".format(Timer.OK, self.tinfo['name']))\n time_asleep = 1\n if self.testmode is False:\n while self.tinfo['alarm_time'] >= datetime.now():\n if time_asleep % 60 == 0:\n if self.verbose:\n print(\"|{}| +1 minute.\".format(datetime.now().strftime(\"%H:%M:%S\"))) \n time_asleep += 1\n sleep(1)\n self.execute_target(self.tinfo)\n return True\n elif self.testmode is True:\n print(\"\\t{} **** TESTMODE.Forcing immediate exec!\".format(Timer.OK))\n self.execute_target()\n return True\n else:\n print(\"\\t testmode must be True or False!\")\n return False", "def test_escalate_questions_cron(self, submit_ticket):\n\n questions_to_escalate = [\n # Questions over 24 hours old without an answer.\n question(\n created=datetime.now() - timedelta(hours=24, minutes=10),\n save=True),\n question(\n created=datetime.now() - timedelta(hours=24, minutes=50),\n save=True),\n ]\n\n # Question about Firefox OS\n fxos = product(slug='firefox-os', save=True)\n q = question(\n created=datetime.now() - timedelta(hours=24, minutes=10),\n product=fxos,\n save=True)\n questions_to_escalate.append(q)\n\n questions_not_to_escalate = [\n # Questions newer than 24 hours without an answer.\n question(save=True),\n question(created=datetime.now() - timedelta(hours=11), save=True),\n question(created=datetime.now() - timedelta(hours=21), save=True),\n ]\n\n # Question older than 24 hours with a recent answer.\n q = question(\n created=datetime.now() - timedelta(hours=24, minutes=10),\n save=True)\n answer(created=datetime.now() - timedelta(hours=10), question=q,\n save=True)\n answer(created=datetime.now() - timedelta(hours=1), creator=q.creator,\n question=q, save=True)\n questions_not_to_escalate.append(q)\n\n # Question older than 24 hours with a recent answer by the asker.\n q = question(\n created=datetime.now() - timedelta(hours=24, minutes=10),\n save=True)\n answer(\n created=datetime.now() - timedelta(hours=15), creator=q.creator,\n question=q, save=True)\n questions_not_to_escalate.append(q)\n\n # Question older than 24 hours without an answer already escalated.\n q = question(\n created=datetime.now() - timedelta(hours=24, minutes=10),\n save=True)\n q.tags.add(config.ESCALATE_TAG_NAME)\n questions_not_to_escalate.append(q)\n\n # Question with an inactive user.\n q = question(\n created=datetime.now() - timedelta(hours=24, minutes=10),\n save=True)\n q.creator.is_active = False\n q.creator.save()\n questions_not_to_escalate.append(q)\n\n # Question about Thunderbird, which is one of the products we exclude.\n tb = product(slug='thunderbird', save=True)\n q = question(\n created=datetime.now() - timedelta(hours=24, minutes=10),\n product=tb,\n save=True)\n questions_not_to_escalate.append(q)\n\n # Run the cron job and verify only 3 questions were escalated.\n eq_(len(questions_to_escalate), escalate_questions())", "def test_calendar(self):\n response = self.app.get(\"/schedule\")\n self.assertTrue(response.status_code, 200)", "def test_list_alerts(self):\n pass", "def test_current_hour_equal_to_sleep_hour(self):\n self.mock_clock.now.return_value = datetime.datetime(2016, 5, 24, 2)\n sleep_windows = [(2, 8)]\n pump_sched = pump_scheduler.PumpScheduler(self.mock_clock,\n sleep_windows)\n self.assertFalse(pump_sched.is_running_pump_allowed())", "def schedule_monitor(schedule):\n if schedule[\"state\"] == EC2State.STOPPED:\n if (date.today() - schedule[\"lastStateChange\"]).days >= 7 - schedule[\n \"schedule\"\n ]:\n schedule[\"state\"] = EC2State.STARTED\n elif schedule[\"state\"] == EC2State.STARTED:\n if (date.today() - schedule[\"lastStateChange\"]).days >= schedule:\n schedule[\"state\"] = EC2State.STOPPED\n else:\n return schedule, False\n\n return schedule, True", "def test_straddling_events(sample_events, woodshop, caplog):\n caplog.set_level(logging.INFO)\n event1, event2 = sample_events.make_straddling_events()\n woodshop.start_event(event1)\n woodshop.log_conflicts(event1.start_time)\n woodshop.start_event(event2)\n woodshop.log_conflicts(event2.start_time)\n woodshop.end_event(event2)\n woodshop.log_conflicts(event2.end_time)\n woodshop.end_event(event1)\n woodshop.log_conflicts(event1.end_time)\n assert len(caplog.messages) == 1\n message = caplog.messages[0]\n assert \"Schedule conflict: place='Woodshop'\" in message\n expected_conflict_times = \"Conflict(start_time='{}', end_time='{}',\".format(\n event2.start_time, event2.end_time)\n assert expected_conflict_times in message\n assert event1.meetup_id in message\n assert event2.meetup_id in message", "def test_changing_date(self):\n days = 2\n appt_date = datetime.date.today() + datetime.timedelta(days=days)\n confirmed = self.create_confirmed_notification(self.test_patient,\n appt_date)\n unconfirmed = self.create_unconfirmed_notification(self.other_patient,\n appt_date)\n\n # run email job\n from aremind.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router, days=days)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertPatientInMessage(message, self.test_patient)\n self.assertPatientInMessage(message, self.other_patient)\n self.assertPatientNotInMessage(message, self.unrelated_patient)", "def test_skip_if_no_patients(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=5)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n from aremind.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 0)", "def test_01_verify_eventing_plugin(self):\n self.fc.flow_load_home_screen()\n self.home.select_menu()\n self.home.select_eventing_plugin()\n if not self.eventing.verify_eventing_plugin_test():\n self.eventing.select_eventing_dispatch_open()\n self.eventing.select_eventing_plugin_test()\n assert self.eventing.eventing_test_result() == \"Event Sent!\"\n self.eventing.select_eventing_dispatch_close()", "def test_set_power_schedule_for_deployment_run(self):\n pass", "def test_past_event(self):\n pass", "def check_all_alert():\n warning = []\n \n all_alerts = db.get_table_content(\"Alert\")\n for alert in all_alerts:\n ticker = Ticker.Ticker(alert[0], True)\n \n if ticker.is_valid and ticker.last_price > 0:\n if alert[1] == \"up\":\n if ticker.last_price > alert[2]:\n tmp_alert = Alert(ticker, alert[1], alert[2], True)\n warning.append(tmp_alert)\n elif alert[1] == 'down':\n if ticker.last_price < alert[2]:\n tmp_alert = Alert(ticker, alert[1], alert[2], True)\n warning.append(tmp_alert)\n \n return warning", "def test_meeting_poll_update(self):\n pass", "def test_admin_alarm_admin_add(self):\n response = self.client.get(\"/admin/appointment/alarm/add/\")\n self.assertEqual(response.status_code, 200)", "def test_post_monitoring_schedule_vendor_v3(self):\n pass", "def test_accepted(self):\n actions = signoff_actions(appversions={\"code\": \"fx1.0\"},\n locales={\"code\": \"de\"})\n actions = list(actions)\n eq_(len(actions), 1)\n so = Signoff.objects.get(action=actions[0][0])\n eq_(so.push.tip.shortrev, \"l10n de 0002\")\n eq_(so.locale.code, \"de\")\n eq_(so.action_set.count(), 2)", "def test_current_hour_equal_to_wake_hour(self):\n self.mock_clock.now.return_value = datetime.datetime(2016, 5, 24, 8)\n sleep_windows = [(2, 8)]\n pump_sched = pump_scheduler.PumpScheduler(self.mock_clock,\n sleep_windows)\n self.assertTrue(pump_sched.is_running_pump_allowed())", "def verify_event_timing(self, event, item):\n return True", "def test_course_beta_period(self):\r\n self.assertFalse(self.course.has_started())\r\n\r\n # student user shouldn't see it\r\n self.assertFalse(has_access(self.normal_student, 'load', self.course))\r\n\r\n # now the student should see it\r\n self.assertTrue(has_access(self.beta_tester, 'load', self.course))", "def _anomaly_check(self, line):\n logger.info(self._get_inner_time() + ' < %s', line.strip())\n if not line or not line.strip().isdigit():\n # format error\n logger.error(self._get_inner_time() + ' ! `%s` can\\'t be parsed, int is required', line)\n return\n\n num = int(line.strip())\n if num in self.event_timestamps:\n # check if not already reported\n if self.alarms[num]:\n logger.error(self._get_inner_time() + ' ! you have already reported event n. %i', num)\n else:\n # check age of event\n last_allowed_timestamp = self.event_timestamps[num] + datetime.timedelta(hours=1)\n if self.last_two_timestamps[0] is None or self.last_two_timestamps[0] <= last_allowed_timestamp:\n self.alarms[num] = 1\n else:\n logger.error(\n self._get_inner_time() +\n ' ! late event %i reporting (event: %s, you already read events: %s and %s)',\n num, self.event_timestamps[num], self.last_two_timestamps[0], self.last_two_timestamps[1])\n else:\n logger.error(\n self._get_inner_time() + ' ! you are forbidden to predict event %i that you haven\\'t seen yet', num)", "def test_monitorclient_alert_email(self, monitor_commercial_setup_no_client):\n mailbox_path = \"/var/spool/mail/local\"\n wait_for_alert_interval_s = 8\n expected_from = \"[email protected]\"\n service_name = \"crond\"\n user_name = \"[email protected]\"\n devid, _, _, mender_device = self.prepare_env(\n monitor_commercial_setup_no_client, user_name\n )\n logger.info(\"test_monitorclient_alert_email: env ready.\")\n\n logger.info(\n \"test_monitorclient_alert_email: email alert on systemd service not running scenario.\"\n )\n prepare_service_monitoring(mender_device, service_name)\n time.sleep(2 * wait_for_alert_interval_s)\n\n mender_device.run(\"systemctl stop %s\" % service_name)\n logger.info(\n \"Stopped %s, sleeping %ds.\" % (service_name, wait_for_alert_interval_s)\n )\n time.sleep(wait_for_alert_interval_s)\n\n mail = monitor_commercial_setup_no_client.get_file(\"local-smtp\", mailbox_path)\n messages = parse_email(mail)\n\n assert len(messages) > 0\n m = messages[0]\n assert \"To\" in m\n assert \"From\" in m\n assert \"Subject\" in m\n assert m[\"To\"] == user_name\n assert m[\"From\"] == expected_from\n assert (\n m[\"Subject\"]\n == \"[CRITICAL] \" + service_name + \" on \" + devid + \" status: not-running\"\n )\n logger.info(\"test_monitorclient_alert_email: got CRITICAL alert email.\")\n\n mender_device.run(\"systemctl start %s\" % service_name)\n logger.info(\n \"Started %s, sleeping %ds\" % (service_name, wait_for_alert_interval_s)\n )\n time.sleep(wait_for_alert_interval_s)\n mail = monitor_commercial_setup_no_client.get_file(\"local-smtp\", mailbox_path)\n logger.debug(\"got mail: '%s'\", mail)\n messages = parse_email(mail)\n for m in messages:\n logger.debug(\"got message:\")\n logger.debug(\" body: %s\", m.get_body().get_content())\n logger.debug(\" To: %s\", m[\"To\"])\n logger.debug(\" From: %s\", m[\"From\"])\n logger.debug(\" Subject: %s\", m[\"Subject\"])\n\n messages_count = len(messages)\n assert messages_count > 1\n m = messages[1]\n assert \"To\" in m\n assert \"From\" in m\n assert \"Subject\" in m\n assert m[\"To\"] == user_name\n assert m[\"From\"] == expected_from\n assert (\n m[\"Subject\"] == \"[OK] \" + service_name + \" on \" + devid + \" status: running\"\n )\n logger.info(\"test_monitorclient_alert_email: got OK alert email.\")\n\n logger.info(\n \"test_monitorclient_alert_email: email alert on log file containing a pattern scenario.\"\n )\n log_file = \"/tmp/mylog.log\"\n mender_device.run(\"echo 'some line' >> \" + log_file)\n prepare_log_monitoring(\n mender_device, service_name, log_file, \"session opened for user [a-z]*\",\n )\n time.sleep(2 * wait_for_alert_interval_s)\n mender_device.run(\"echo 'some line' >> \" + log_file)\n time.sleep(wait_for_alert_interval_s)\n mail = monitor_commercial_setup_no_client.get_file(\"local-smtp\", mailbox_path)\n messages = parse_email(mail)\n assert messages_count == len(messages)\n\n mender_device.run(\n \"echo 'a new session opened for user root now' >> \" + log_file\n )\n time.sleep(wait_for_alert_interval_s)\n mender_device.run(\"echo 'some line' \" + log_file)\n time.sleep(2 * wait_for_alert_interval_s)\n mail = monitor_commercial_setup_no_client.get_file(\"local-smtp\", mailbox_path)\n logger.debug(\"got mail: '%s'\", mail)\n messages = parse_email(mail)\n m = messages[-1]\n logger.debug(\"got message:\")\n logger.debug(\" body: %s\", m.get_body().get_content())\n logger.debug(\" To: %s\", m[\"To\"])\n logger.debug(\" From: %s\", m[\"From\"])\n logger.debug(\" Subject: %s\", m[\"Subject\"])\n assert \"To\" in m\n assert \"From\" in m\n assert \"Subject\" in m\n assert m[\"To\"] == user_name\n assert m[\"From\"] == expected_from\n assert m[\"Subject\"].startswith(\n \"[LOGCONTAINS] \" + service_name + \" on \" + devid + \" status: log-contains\"\n )\n\n logger.info(\n \"test_monitorclient_alert_email: email alert a pattern found in the journalctl output scenario.\"\n )\n service_name = \"mender-client\"\n prepare_log_monitoring(\n mender_device,\n service_name,\n \"@journalctl -u \" + service_name,\n \"State transition: .*\",\n )\n mender_device.run(\"systemctl restart mender-monitor\")\n time.sleep(wait_for_alert_interval_s)\n mail = monitor_commercial_setup_no_client.get_file(\"local-smtp\", mailbox_path)\n logger.debug(\"got mail: '%s'\", mail)\n messages = parse_email(mail)\n m = messages[-1]\n logger.debug(\"got message:\")\n logger.debug(\" body: %s\", m.get_body().get_content())\n logger.debug(\" To: %s\", m[\"To\"])\n logger.debug(\" From: %s\", m[\"From\"])\n logger.debug(\" Subject: %s\", m[\"Subject\"])\n assert \"To\" in m\n assert \"From\" in m\n assert \"Subject\" in m\n assert m[\"To\"] == user_name\n assert m[\"From\"] == expected_from\n assert m[\"Subject\"].startswith(\n \"[LOGCONTAINS] \"\n + service_name\n + \" on \"\n + devid\n + \" status: log-contains State transition:\"\n )", "def periodCheck(data):", "def test_api_livesession_read_attendances_admin(self):\n video = VideoFactory(\n live_state=RUNNING,\n live_info={\n \"started_at\": \"1533686400\",\n },\n live_type=JITSI,\n )\n video2 = VideoFactory(\n live_state=RUNNING,\n live_info={\n \"started_at\": \"1533686400\",\n },\n live_type=JITSI,\n )\n # livesession no display_name email defined\n live_session_email = LiveSessionFactory(\n consumer_site=video.playlist.consumer_site,\n email=\"[email protected]\",\n is_registered=True,\n lti_id=str(video.playlist.lti_id),\n lti_user_id=\"56255f3807599c377bf0e5bf072359fd\",\n video=video,\n )\n\n # other liveregistration no display_name username defined\n live_session_display_name = LiveSessionFactory(\n anonymous_id=uuid.uuid4(),\n consumer_site=video.playlist.consumer_site,\n display_name=\"Aurélie\",\n email=\"[email protected]\",\n live_attendance={\"1533686400\": {\"wonderful\": True}},\n lti_id=str(video.playlist.lti_id),\n lti_user_id=\"77255f3807599c377bf0e5bf072359fd\",\n username=\"Ignored\",\n video=video,\n )\n # other liveregistration no display_name username defined\n live_session_username = LiveSessionFactory(\n consumer_site=video.playlist.consumer_site,\n email=None,\n live_attendance={\"1533686400\": {\"wonderful\": True}},\n lti_id=str(video.playlist.lti_id),\n lti_user_id=\"99255f3807599c377bf0e5bf072359fd\",\n username=\"Sophie\",\n video=video,\n )\n\n # will be ignored live_attendance is empty and is_registered is\n # False\n AnonymousLiveSessionFactory(\n email=None,\n live_attendance={},\n video=video,\n )\n # will be ignored other video\n AnonymousLiveSessionFactory(\n email=None,\n live_attendance={\"1533686400\": {\"wonderful\": True}},\n video=video2,\n )\n # token with right context_id and lti_user_id\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n consumer_site=str(video.playlist.consumer_site.id),\n context_id=str(video.playlist.lti_id),\n )\n\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n live_session_email.refresh_from_db()\n live_session_display_name.refresh_from_db()\n live_session_username.refresh_from_db()\n\n self.assertEqual(\n response.json(),\n {\n \"count\": 3,\n \"next\": None,\n \"previous\": None,\n \"results\": [\n {\n \"id\": str(live_session_email.id),\n \"display_name\": \"[email protected]\",\n \"is_registered\": True,\n \"live_attendance\": video.get_list_timestamps_attendances(),\n },\n {\n \"id\": str(live_session_display_name.id),\n \"display_name\": \"Aurélie\",\n \"is_registered\": False,\n # the aim of the test is not to test live_attendance's value\n \"live_attendance\": response.json()[\"results\"][1][\n \"live_attendance\"\n ],\n },\n {\n \"id\": str(live_session_username.id),\n \"display_name\": \"Sophie\",\n \"is_registered\": False,\n # the aim of the test is not to test live_attendance's value\n \"live_attendance\": response.json()[\"results\"][2][\n \"live_attendance\"\n ],\n },\n ],\n },\n )", "async def test_notifier_private_sends_alert(self):\n test_cases = (900, 1800, 2700)\n for current_loop in test_cases:\n with self.subTest(current_loop=current_loop):\n with mock.patch.object(self.notifier, \"_current_loop\", new=current_loop):\n await self.notifier._notifier()\n self.alert_channel.send.assert_called_once_with(\n f\"<@&{Roles.moderators}> currently silenced channels: \"\n )\n self.alert_channel.send.reset_mock()", "def test_wake_hour_less_than_sleep_hour(self):\n self.mock_clock.now.return_value = datetime.datetime(2016, 5, 24, 0)\n sleep_windows = [(22, 8)]\n pump_sched = pump_scheduler.PumpScheduler(self.mock_clock,\n sleep_windows)\n self.assertFalse(pump_sched.is_running_pump_allowed())", "def test_overlap(self):\r\n t = Expense(name = \"fake lunch\",\r\n amount = 1.,\r\n on = (WeeklyRecurring(FR,\r\n fromdt = self.fromdt,\r\n todt = self.todt),\r\n DailyRecurring(fromdt = self.fromdt, \r\n todt = self.todt)))\r\n\r\n self.m.addTransaction(t)\r\n self.assertEqual(self.m.totalSaved(self.fromdt, self.todt), -365.)", "def important_event(time: int) -> bool:\n last_event = get_events(True)[0]\n try:\n time_event = int(last_event.split('\\n')[0].strip(\"'\"))\n except ValueError:\n time_event = int(last_event.split('\\n')[-1].strip(\"'\"))\n if time - time_event < 60:\n return 'gol' in last_event or 'cartão' in last_event\n return False" ]
[ "0.65938187", "0.6176482", "0.61750144", "0.61458766", "0.604789", "0.60455877", "0.60449404", "0.59740597", "0.59729266", "0.5958995", "0.5948456", "0.5939794", "0.5884633", "0.58724236", "0.5820209", "0.58177555", "0.57953286", "0.578536", "0.5773353", "0.57711244", "0.5767238", "0.5741776", "0.57362425", "0.57362425", "0.5733171", "0.5727527", "0.57252604", "0.5710773", "0.5692727", "0.5690956", "0.5668741", "0.5663546", "0.56511", "0.5638774", "0.56111234", "0.5594194", "0.557944", "0.55636626", "0.5562639", "0.55615646", "0.55581594", "0.55495125", "0.5541782", "0.55123246", "0.5510597", "0.5487504", "0.54748434", "0.5469147", "0.5468715", "0.5467244", "0.54651", "0.5455736", "0.545043", "0.5448096", "0.5447251", "0.5446075", "0.54405946", "0.54383785", "0.5434015", "0.54285145", "0.54159856", "0.53953934", "0.5393576", "0.5389575", "0.5385375", "0.53787804", "0.5376861", "0.53757274", "0.5368975", "0.5364844", "0.5359848", "0.53587085", "0.5358456", "0.53518593", "0.535183", "0.53499895", "0.53498566", "0.53486985", "0.533231", "0.5330912", "0.5329893", "0.53279054", "0.5315271", "0.5312766", "0.53122693", "0.5311094", "0.53074086", "0.530621", "0.5289177", "0.52882296", "0.52869916", "0.5284699", "0.52737105", "0.52730083", "0.52634317", "0.5258126", "0.525671", "0.525634", "0.52562386", "0.52521896" ]
0.6369556
1
Counting sort arr in place.
def counting_sort(arr): # No need to sort if arr is None: return arr n = len(arr) if n <= 1: return arr # find the counting scope, i.e., the max value max_value = arr[0] for i in range(1, n): if arr[i] > max_value: max_value = arr[i] # init the counting array via list comprehension count_arr = [0 for _ in range(max_value + 1)] # update the counting number for i in arr: count_arr[i] += 1 # update the total counting number for i in range(1, max_value + 1): count_arr[i] += count_arr[i - 1] # store sorted result in a temp array, why scan inversely? # note reverse-scanning can guarantee the sort result is stable tmp_arr = [0 for _ in range(n)] for i in range(n - 1, -1, -1): idx = count_arr[arr[i]] - 1 tmp_arr[idx] = arr[i] count_arr[arr[i]] -= 1 # copy result back to original array for i in range(n): arr[i] = tmp_arr[i]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def countingSort2(arr):\n freq = [0] * 100\n for el in arr:\n freq[el] += 1\n\n sorted_arr = []\n for i in range(len(freq)):\n sorted_arr = sorted_arr + [i] * freq[i]\n return sorted_arr", "def counting_sort(arr):\n # Given that numbers are integers in range 0 <= x < 100\n counts = [0 for _ in xrange(100)]\n for num in arr:\n counts[num] += 1\n sorted_arr = []\n for i in xrange(100):\n sorted_arr.extend([i] * counts[i])\n print \" \".join(map(str, sorted_arr))", "def countingSort(arr):\n freq = [0] * 100\n for el in arr:\n freq[el] += 1\n return freq", "def counting_sort_o(arr: List[int]) -> List[int]:\n ar_min = min(arr)\n ar_max = max(arr)\n count_arr = [0] * (ar_max - ar_min + 1)\n res = [0] * len(arr)\n for el in arr:\n count_arr[el - ar_min] += 1\n for i in range(1, ar_max - ar_min + 1):\n count_arr[i] += count_arr[i - 1]\n for i in range(len(arr) - 1, -1, -1):\n res[count_arr[arr[i] - ar_min] - 1] = arr[i]\n count_arr[arr[i] - ar_min] -= 1\n return res", "def countSort(array):\n output = [0 for i in range(256)]\n count = [0 for i in range(256)]\n result = [\"\" for element in array]\n for i in array:\n result[ord(i)] += 1\n for i in range(256):\n result[i] += result[i - 1]\n for i in range(len(array)):\n output[result[ord(array[i])] - 1] = array[i]\n count[ord(array[i])] -= 1\n for i in range(len(array)):\n result[i] = output[i]\n return result", "def sort(self, nums: List[int]) -> None:\n size = len(nums)\n\n # Determine the max number in the array\n max_num = 0\n for num in nums:\n if num > max_num:\n max_num = num\n\n # Initialize the count array\n count = [0] * (max_num+1)\n\n # Store the number of occurances in the array into the count array (O(n))\n for i in range(size):\n count[nums[i]] += 1\n\n # Calculate the accumulative sum in the count array (O(k))\n for j in range(1, len(count)):\n count[j] += count[j-1]\n\n # Sort the numbers in the array based on the count position in the count array (O(n))\n output = [0] * size\n h = size-1\n while h >= 0:\n key = nums[h]\n index = count[key]-1\n output[index] = key\n count[key] -= 1\n h -= 1\n\n # Copy the output into the array\n for l in range(size):\n nums[l] = output[l]", "def count_sort(arr, k=1000):\n # Pre-allocate memory for k bins to store occurrences\n store = [[] for i in range(k)]\n # Iterate through arr and store each occurence in resp. bin\n for v in arr:\n store[v].append(v)\n # Initialize output\n output = []\n # Iterate through bins and add each list to output\n for i in range(k):\n if store[i]:\n output += store[i]\n return output", "def __digit_counting_sort(self, arr: list, exp: int) -> list: \n n = len(arr)\n work_arr = [None]*n\n\n # initialize counting array (assuming base 10)\n count_arr = [0]*10\n\n # count occurrences of each element digit in array\n for x in arr:\n # find digit index\n digit_index = int((x / exp) % 10)\n count_arr[digit_index] += 1\n\n # do an accumulative sun of the elements digit occurences in the counting array\n count_arr = list(accumulate(count_arr))\n\n # iterate array elements in reverse and place in sorted array based on occurences in counting array\n # reverse iteration ensures that in the case of equality, the elements occuring first in the unsorted array will occur first in the sorted array\n for x in arr[::-1]:\n digit_index = int((x / exp) % 10)\n work_arr[count_arr[digit_index]-1] = x\n count_arr[digit_index] -= 1\n return work_arr", "def count_sort_ascending(arr: StaticArray) -> StaticArray:\n # finds the maximum element\n maximum = arr[0]\n for index in range(arr.size()):\n if abs(arr[index]) > maximum:\n maximum = abs(arr[index])\n\n # creates max+1 arrays for positives and negatives\n maximum += 1\n count_pos = StaticArray(maximum)\n\n # records the number of iterations of an array element\n # by setting the corresponding index position of the count array to the number of iterations\n for index in range(arr.size()):\n current = arr[index]\n if abs(current) > 0:\n if count_pos[abs(current)] is None:\n count_pos.set(abs(current), 1)\n else:\n count_pos[abs(current)] += 1\n\n # zero\n elif current == 0:\n if count_pos[0] is None:\n count_pos[0] = 1\n else:\n count_pos[0] += 1\n\n # sums non-empty spaces and sets empty spaces equal to zero\n length = 0\n # iterate through positive array\n for index in range(count_pos.size()):\n if count_pos[index] is None:\n count_pos[index] = 0\n else:\n length += count_pos[index]\n\n # create array for the results\n result_array = StaticArray(length)\n\n result_array_index = 0\n\n # adds elements in positive array to results array from largest to smallest\n for index in range(count_pos.size()):\n while count_pos[index] > 0:\n result_array.set(result_array_index, index)\n result_array_index += 1\n count_pos[index] -= 1\n\n return result_array\n # end count_sort_ascending function declaration", "def count_sorting(array):\n count = Counter(array)\n alphabet = sorted(count.keys())\n\n for letter, next_letter in zip(alphabet, alphabet[1:]):\n count[next_letter] += count[letter]\n\n previous = 0\n for letter in alphabet:\n previous, count[letter] = count[letter], previous\n\n aux = array[:]\n\n for current_letter in aux:\n position = count[current_letter]\n count[current_letter] += 1\n array[position] = current_letter", "def counting_sort3(arr, key=ord, vrange = 256):\n # count array to store count of individual\n # characters and initialize count array as 0\n count = [0 for _ in range(vrange)]\n\n # Output array that will have sorted arr\n output = [None for _ in arr]\n\n # Store count of each character\n for c in arr:\n count[key(c)] += 1\n\n # Change count[i] so that count[i] now contains actual\n # position of this character in output array\n for i in range(1, vrange):\n count[i] += count[i-1]\n\n # Build the output char array\n for c in arr:\n count[key(c)] -= 1\n output[count[key(c)]] = c\n\n return output", "def countingSort(self, array: List[int], exp: int, render: bool = True) -> None:\n\n count = [0 for _ in range(max(array) + 1)]\n\n for i in range(len(array)):\n count[RadixSort.digit(array[i], exp)] += 1\n if render:\n self.render(array, cur=i)\n sleep(0.01)\n\n for i in range(1, len(count)):\n count[i] += count[i - 1]\n if render:\n self.render(array, cur=i)\n\n output = [0 for _ in range(len(array))]\n\n for i in range(len(array) - 1, -1, -1):\n output[count[RadixSort.digit(array[i], exp)] - 1] = array[i]\n count[RadixSort.digit(array[i], exp)] -= 1\n if render:\n self.render(\n output,\n cur=(\n count[RadixSort.digit(array[i], exp)] + 1,\n count[RadixSort.digit(array[i], exp)],\n ),\n )\n sleep(0.01)\n\n array[:] = output\n self.render(array)\n sleep(0.01)", "def count_sort(arr: StaticArray) -> StaticArray:\n # finds the maximum element\n maximum = arr[0]\n for index in range(arr.size()):\n if abs(arr[index]) > maximum:\n maximum = abs(arr[index])\n\n # creates max+1 arrays for positives and negatives\n maximum += 1\n count_pos = StaticArray(maximum)\n count_neg = StaticArray(maximum)\n\n # records the number of iterations of an array element\n # by setting the corresponding index position of the count array to the number of iterations\n for index in range(arr.size()):\n current = arr[index]\n\n # positive numbers\n if current > 0:\n if count_pos[current] is None:\n count_pos.set(current, 1)\n else:\n count_pos[current] += 1\n\n # zero\n elif current == 0:\n if count_pos[0] is None:\n count_pos[0] = 1\n else:\n count_pos[0] += 1\n\n # negative numbers\n else:\n if count_neg[abs(current)] is None:\n count_neg.set(abs(current), 1)\n else:\n count_neg[abs(current)] += 1\n\n # sums non-empty spaces and sets empty spaces equal to zero\n length = 0\n # iterate through positive array\n for index in range(count_pos.size()):\n if count_pos[index] is None:\n count_pos[index] = 0\n else:\n length += count_pos[index]\n\n # iterate through negative array\n for index in range(count_neg.size()):\n if count_neg[index] is None:\n count_neg[index] = 0\n else:\n length += count_neg[index]\n\n # create array for the results\n result_array = StaticArray(length)\n\n # adds elements in positive array to results array from largest to smallest\n result_array_index = 0\n last = count_pos.size() - 1\n for index in range(count_pos.size()):\n while count_pos[last] > 0:\n result_array.set(result_array_index, last)\n result_array_index += 1\n count_pos[last] -= 1\n last -= 1\n\n # adds elements in negative array to results array from largest to smallest\n for index in range(count_neg.size()):\n while count_neg[index] > 0:\n result_array.set(result_array_index, -index)\n result_array_index += 1\n count_neg[index] -= 1\n\n return result_array", "def fullcountSort(arr):\n sorted = [[] for i in range(100)]\n for i in range(int(len(arr) / 2)):\n sorted[int(arr[i][0])].append('-')\n for i in range(int(len(arr) / 2), len(arr)):\n sorted[int(arr[i][0])].append(arr[i][1])\n\n output = ''\n for item in sorted:\n if item == []:\n pass\n else:\n output = output + ' '.join(item) + ' '\n print(output)", "def mysort(arr):\n arr.sort(key=int)\n\n return arr", "def maxChunksToSorted(self, arr: List[int]) -> int:\n count = 0\n sorted_array = sorted(arr)\n s1 = s2 = 0\n for num1, num2 in zip(arr, sorted_array):\n s1 += num1\n s2 += num2\n if s1 == s2:\n count += 1\n return count", "def custom_sort(arr):\n pass", "def countingsort(values):\n\n output = []\n \n n = len(values)\n\n # O(n) to find maximum value in input to map range.\n k = max(values)\n\n # we're could leverage a hashtable for this instead of an array, the C\n # version will use an array.\n counts = []\n\n for i in range(k+1):\n counts.append(0)\n\n # there are a few ways to implement this; i've chosen one, but there is \n # another with which I am familiar but is super pythonic and I wanted \n # something more general\n # the other variation just loads the entire thing into the counts as a\n # list, and then dumps out.\n\n # get the counts. \n for v in values:\n counts[v] += 1\n output.append(0) # to make it an array of the same size as input\n\n # get the totals so you have counts[i] is total <= i instead of == i.\n # prefix sums.\n total = 0\n for i in range(k+1):\n total += counts[i]\n counts[i] = total\n\n # start, stop, step\n for i in range(n-1, -1, -1):\n l = values[i] # the value\n output[counts[l]-1] = l\n counts[l] -= 1\n\n return output", "def counting_sort(num_lst):\n n = len(num_lst)\n # initialise counter list with base\n counter = [0 for i in range(26)]\n # for each value in input list, store count of each element at their respective indexes\n for element in num_lst:\n counter[element] += 1\n\n # calculate the cumulative sum\n for j in range(1,len(counter)):\n counter[j] += counter[j-1]\n\n result = [ 0 for i in range(n)]\n\n # construct output\n for k in range(n-1,-1,-1):\n # calculate key to point to position in counter list\n # set output[position[key] - 1] to the val from input\n key = num_lst[k]\n result[counter[key] -1] = num_lst[k]\n # decrement counter\n counter[key] -=1\n\n for i in range(n):\n num_lst[i] = result[i]\n return num_lst", "def groupcounter(arr):\n\treturn np.stack(np.unique(arr, return_counts=True), axis=1)", "def sort_012(input_list):\n cnt0=0 # number of 0's in the array\n cnt1=0 # number of 1's in the array\n cnt2=0 # number of 2's in the array\n for num in input_list:\n if num==0:\n cnt0+=1\n elif num==1:\n cnt1+=1\n else:\n cnt2+=1\n\n ans=[0]*cnt0+[1]*cnt1+[2]*cnt2 # form a list with the individual counts\n return ans", "def Shell_sort(arr):\n\n sub_count = len(arr)//2\n while sub_count > 0:\n for start in range(sub_count):\n gap_insertion_sort(arr, start, sub_count)\n sub_count = sub_count//2", "def sort(self, nums: List[int]) -> None:\n\n # Start at the 2nd element in the array\n for i in range(1, len(nums)):\n\n # Set the key to the beginning of the unsorted subarray\n key = nums[i]\n\n # Set a pointer to the end of the sorted subarray\n j = i-1\n\n # Shift elements in the sorted array to the right as needed\n # until the correct position for the key is found\n while j >= 0 and key < nums[j]:\n nums[j+1] = nums[j]\n j -= 1\n\n # Set the key to the found position\n nums[j+1] = key", "def _rank_array(a):\n a = np.array(a)\n b = a.argsort()\n c = np.empty_like(b)\n c[b] = np.arange(1, len(a) + 1)\n return c", "def sort(arr, times):\n swaped = False\n \n for i in range(len(arr)):\n if i+1 != arr[i]:\n first_to_swap = arr[i]\n first_index = i\n swaped = True\n break\n \n for i in range(len(arr)):\n if swaped:\n if first_to_swap == i+1:\n second_to_swap = arr[i]\n second_index = i\n break\n \n if swaped:\n arr[second_index] = first_to_swap\n arr[first_index] = second_to_swap\n times += 1\n times = sort(arr, times)\n \n return times", "def insertion_sort(arr):\n pass", "def countingSort(sequence, k):\n count = [0 for _ in range(0, k)]\n bucket = [0] * len(sequence)\n # for i in range(0, k):\n # C[i] = 0\n\n for ele in sequence:\n count[ele] += 1\n\n for i in range(1, k):\n count[i] += count[i - 1]\n\n for j in range(len(sequence) - 1, -1, -1):\n item = sequence[j]\n bucket[count[item] - 1] = sequence[j]\n count[item] -= 1\n return bucket", "def counting_sort(numbers):\n if len(numbers) <= 1:\n return numbers\n\n # Find range of given numbers (minimum and maximum integer values)\n minimum, maximum = min(numbers), max(numbers)\n # Create list of counts with a slot for each number in input range\n count_ls = [0] * (maximum - minimum + 1)\n # Loop over given numbers and increment each number's count\n for num in numbers:\n count_ls[num - minimum] += 1\n # Loop over counts and append that many numbers into output list\n k = 0\n for index, num in enumerate(count_ls):\n for i in range(num):\n # nums.append(index+minimum)\n numbers[k] = index+minimum\n k += 1\n\n\n return numbers", "def counting_sort(numbers):\n # TODO: Find range of given numbers (minimum and maximum integer values)\n min_num = min(numbers)\n max_num = max(numbers) + 1\n\n # TODO: Create list of counts with a slot for each number in input range\n # create list with a length of the value of max_num\n count_list = [0] * max_num\n\n # TODO: Loop over given numbers and increment each number's count\n # make the counted list by incrementing each instance of a number count\n for i in numbers:\n count_list[i] += 1\n\n # TODO: Loop over counts and append that many numbers into output list\n comp = 0\n for i in range(0, max_num):\n # make a temporary copy of the count_list index value\n temp = count_list[i]\n # set current loop instance index value to the comp value\n count_list[i] = comp\n # set comp value to temp value\n comp += temp\n\n # create result list with len of original list with no value\n result = [0] * len(numbers)\n for i in numbers:\n # for each number in list, set the corrisponding\n # result list index to the value of the instance\n result[count_list[i]] = i\n # move to the next index in the count_list for next instance comparison\n count_list[i] += 1\n # print(result)\n return result\n # FIXME: Improve this to mutate input instead of creating new output list", "def count_sort(v):\n \n biggest = v[0]\n\n\n for i in range (0, len(v)): #finds the biggest number in the list\n if v[i] > biggest:\n biggest = v[i]\n \n\n elements = [0] * (biggest+1) #A list where the entry at index i is the number of times i occurs in the list v\n \n\n for i in range(0,len(v)): \n\n elements[v[i]] += 1\n\n\n sorted_list = [0]*len(v) #Create the sorted list that will be returned\n \n counter = 0\n for i in range(0,len(elements)):\n for j in range(0,elements[i]):\n sorted_list[counter] = i\n counter += 1\n\n return sorted_list", "def sort_012(a):\n # lo keeps track of the running index coming from the beginning of the list\n # hi keeps track of the running index coming from the end of the list\n # m1 and m2 keep track where the subarray of 1's is located \n # (keeps track of the first and last index of the 1's subarray)\n assert(type(a) == list), \"Array has to be a list\"\n lo, m1 = 0, 0\n hi, m2 = len(a)-1, len(a)-1\n runtime = 0\n while lo <= hi:\n runtime += 1\n if a[lo] == 0:\n if m1 < lo:\n a[m1] = 0\n a[lo] = 1\n m1 += 1\n lo += 1\n elif a[hi] == 2:\n if m2 > hi:\n a[m2] = 2\n a[hi] = 1\n m2 -= 1\n hi -= 1\n elif a[lo] == 1:\n lo += 1\n elif a[hi] == 1:\n hi -= 1\n elif a[lo] == 2 and a[hi] == 0:\n if lo == m1:\n a[lo] = 0\n else:\n a[m1] = 0\n a[lo] = 1\n lo += 1\n m1 += 1\n if hi == m2:\n a[hi] = 2\n else:\n a[m2] = 2\n a[hi] = 1\n m2 -= 1\n hi -= 1\n else:\n print(\"Warning: Logic problem\") \n return a, runtime", "def sorted_distinct(arr: list):\n arr.sort()\n sorted_map = {}\n for i in arr:\n if i in sorted_map:\n sorted_map[i] += 1\n else:\n sorted_map[i] = 1\n return [i for i in sorted_map.keys()]", "def wiggleSort(self, arr: List[int]) -> None:\n n = len(arr)\n if n <= 1:\n return\n\n #up = False # start with False so that first pair will be non-dec\n\n for i in range(1, n):\n #if up == (arr[i] >= arr[i-1]):\n if (i & 1 == 0) == (arr[i] >= arr[i-1]):\n arr[i], arr[i-1] = arr[i-1], arr[i]\n \n #up = not up", "def sort(self, arr: list, in_place=False) -> list: \n if in_place:\n work_arr = arr\n else:\n work_arr = arr.copy()\n \n max_elmt = max(work_arr) # max element is used for stopping criteria\n\n # sort every digit until the largest element is reduced to a decimal (assuming array consists of integers only)\n exp = 1 \n while max_elmt/exp >= 1:\n # counting sort is used for every digit\n work_arr = self.__digit_counting_sort(work_arr, exp)\n\n # digits are represented using exp = 10^i, where i is the i'th digit\n exp *= 10\n\n return work_arr", "def count(self, A: [int]) -> [int]:\n\n index_array = []\n rc_arr = []\n for ind in range(0, len(A)):\n index_array.append(ind)\n rc_arr.append(0)\n self.sort(A, index_array, 0, len(index_array) - 1, rc_arr)\n return rc_arr", "def stooge_sort(arr):\r\n stooge(arr, 0, len(arr) - 1)", "def equalizeArray(arr):\n from collections import Counter\n counts = Counter(arr)\n most_common = counts.most_common(1)[0][1]\n return len(arr) - most_common", "def radix_sort_rot(self, labels):\n n = len(labels)\n result = 0\n if n == 0:\n return result\n\n for b in range(self.bits):\n # The output array elements that will have sorted arr\n output = [0]*n\n\n # initialize count array as 0\n count = [0, 0]\n\n # Store count of occurrences in count[]\n for i in range(n):\n count[(labels[i] >> b) % 2] += 1\n\n # Change count[i] so that count[i] now contains actual\n # position of this digit in output array\n count[1] += count[0]\n\n # Build the output array\n for i in range(n-1, -1, -1):\n index = (labels[i] >> b)\n output[count[index % 2] - 1] = labels[i]\n count[index % 2] -= 1\n\n # Copying the output array to arr[],\n # so that arr now contains sorted numbers\n labels = output\n\n previous, occ = labels[0], 1\n for i in range(1, len(labels)):\n label = labels[i]\n if label == previous:\n occ += 1\n else:\n result ^= self.ROT(previous ^ occ, occ)\n occ = 1\n previous = label\n if occ > 0:\n result ^= self.ROT(previous ^ occ, occ)\n return result", "def cocktailsort(arr):\n left, right = 0, len(arr) - 1\n while left < right:\n for i in range(left, right):\n if arr[i] > arr[i + 1]:\n swap(arr, i, i + 1)\n right -= 1\n for i in range(right, left, -1):\n if arr[i] < arr[i - 1]:\n swap(arr, i, i - 1)\n left += 1", "def is_sorted(arr: StaticArray) -> int:\r\n num = 0 # Num variable is what helps determine if the array is in either ascending or descending order\r\n for index in range(arr.size()-1):\r\n if arr[index] < arr[index + 1]:\r\n num += 1\r\n elif arr[index] > arr[index + 1]:\r\n num -= 1\r\n if num == arr.size()-1: # If the array is strictly ascending, num would add 1 each time making it equal to\r\n # the array size -1.\r\n return 1\r\n elif num == -arr.size()+1: # If the array is strictly descending, num would be subtracted by 1 each time, making it\r\n # equal to array size +1\r\n return 2\r\n else:\r\n return 0", "def sort(numbers):\n # This will loop n times according to the size of the array starting at the\n # Last index and ending at 0\n for i in range(len(numbers) - 1, 0, -1):\n # Sets the biggest index to 0\n biggest_index = 0\n\n # This loops i times, so only unsorted indexes are looped through.\n for j in range(i):\n \n # Compares the next index value to the current value at biggest index\n if numbers[biggest_index] < numbers[j + 1]:\n\n # Sets a new biggest index\n biggest_index = (j + 1)\n\n # Swaps the current last index (i) with the biggest value's index\n numbers[i], numbers[biggest_index] = numbers[biggest_index], numbers[i]", "def insertion_sort(arr):\r\n global shifts\r\n res = arr.copy()\r\n n = len(res)\r\n for i in range(n):\r\n j = i\r\n while j > 0 and res[j] < res[j - 1]:\r\n res[j], res[j - 1] = res[j - 1], res[j]\r\n j -= 1\r\n shifts += 1\r\n return res", "def counting_sort(to_sort_list, max_value):\n\n # list of 0's at indecies 0 to max_value\n # this will be a 'histogram' of values from original list to sort\n histogram = [0] * (max_value + 1)\n\n # populate histogram so each index will have a count of times that\n # numer occurs in the original list. Ex: [1, 2, 3, 3] -> [0, 1, 1, 2]\n for item in to_sort_list:\n histogram[item] += 1\n\n # instantiate an output list that will be sorted\n sorted_output = []\n\n # for each item in histogram\n for item, count in enumerate(histogram):\n # for each occurance of the item (will skip the 0s'):\n for time in range(count):\n # add the item to sorted list\n sorted_output.append(item)\n\n return sorted_output", "def sort(self, nums: List[int]) -> None:\n n = len(nums)\n for i in range(n):\n\n # Set the lowest to the beginning of the unsorted subarray\n low = i\n for j in range(i+1,n):\n\n # Find the lowest in the unsorted array\n if nums[j] < nums[low]:\n low = j\n \n # Swap the beginning of the unsorted subarray and the lowest.\n # The beginning of the unsorted subarray now becomes the end of the sorted subarray\n nums[i], nums[low] = nums[low], nums[i]", "def countArrary(input_a):\n if len(input_a) == 1:\n return 0\n else:\n # split the input array\n split_a = [input_a]\n while len(split_a) != len(input_a):\n new_split_a = []\n for sub_a in split_a:\n if len(sub_a) > 1:\n b, c = split_array(sub_a)\n new_split_a.append(b)\n new_split_a.append(c)\n else:\n new_split_a.append(sub_a)\n split_a = deepcopy(new_split_a)\n\n # merge and count\n merge_a = deque(split_a)\n count = 0\n while len(merge_a[0]) < len(input_a):\n new_merge_a = []\n while merge_a:\n a = merge_a.popleft()\n if merge_a:\n b = merge_a.popleft()\n c, c_inv = merge_and_count(a, b)\n count += c_inv\n new_merge_a.append(c)\n else:\n new_merge_a.append(a)\n\n merge_a = deque(deepcopy(new_merge_a))\n\n # print(merge_a)\n return count", "def rearrangeMovieArray():\n # using lambda to sort by values of dict and return list \n new_ranked= sorted(movieViewCounts, key=lambda v:movieViewCounts[v], reverse=True)\n moviesRanked = new_ranked", "def relativeSortArray(self, arr1, arr2):\n\n dict_sort = {}\n list_total = []\n list_diffs = []\n for i in arr2:\n dict_sort[i] = 0\n\n for i in arr1:\n if i in dict_sort:\n dict_sort[i] +=1\n else:\n list_diffs.append(i)\n list_diffs.sort()\n\n for i in arr2:\n list_total.extend([i] * dict_sort[i])\n\n list_total.extend(list_diffs)\n\n return list_total", "def sort():\n return -1", "def countingSort(my_list, target):\r\n count = [0 for i in range(26)]\r\n position = [0 for i in range(26)]\r\n output = [0 for i in range(len(my_list))]\r\n if len(my_list) > 1:\r\n for i in range(len(my_list)):\r\n count[ord(my_list[i][0][target]) - 97] += 1\r\n position[0] = 0\r\n for i in range(1, len(position)):\r\n position[i] = position[i - 1] + count[i - 1]\r\n for i in range(len(my_list)):\r\n key = my_list[i][0]\r\n pos=0\r\n index = ord(my_list[i][0][target]) - 97\r\n if count[index] != 0:\r\n pos = position[index]\r\n position[index] += 1\r\n output[pos] = my_list[i]\r\n\r\n return output", "def insertion_sort(arr):\n\n n = len(arr)\n i = 1\n while i < n:\n unsorted_val = arr[i]\n\n m = i + 1\n j = 1\n while unsorted_val < arr[m - j - 1] and (j < m):\n arr[m - j] = arr[m - j - 1]\n j += 1\n arr[m - j] = unsorted_val\n i += 1", "def sort(self,arr):\n\t\tself.heapify(arr)\n\t\tfor i in range(len(arr)-1,0,-1):\n\t\t\t#swap the first and last elements of the heap\n\t\t\tarr[i],arr[0] = arr[0],arr[i]\n\t\t\tself.bubbleDown(arr,0,i)", "def counting_sort(numbers):\n # Find range of given numbers (minimum and maximum integer values)\n minimum = min(numbers)\n maximum = max(numbers)\n\n # Create list of counts with a slot for each number in input range\n counts_list = [0 for _ in range(minimum, maximum + 1)]\n print(counts_list)\n # Loop over given numbers and increment each number's count\n for number in numbers:\n counts_list[number - minimum] += 1 # account for offset\n \n\n # Loop over counts and append that many numbers into output list\n output_list = []\n for index, count in enumerate(counts_list):\n if count == 0:\n continue\n\n number = index + minimum\n output_list.extend([number] * count)\n \n return output_list\n # FIXME: Improve this to mutate input instead of creating new output list", "def counting_sort(numbers):\n # TODO: Find range of given numbers (minimum and maximum integer values)\n minimum = min(numbers)\n maximum = max(numbers)\n\n # TODO: Create list of counts with a slot for each number in input range\n counts = [[x, 0] for x in set(numbers)]\n\n # TODO: Loop over given numbers and increment each number's count\n for num in numbers:\n for i in range(0, len(counts)):\n if num == counts[i][0]:\n counts[i][1] += 1\n\n # TODO: Loop over counts and append that many numbers into output list\n output = []\n for count in counts:\n for _ in range(count[1]):\n output.append(count[0])\n \n return output\n # FIXME: Improve this to mutate input instead of creating new output list", "def merge_and_count(array1, array2):\n out_array = []\n num_inversions = 0\n i = j = 0\n while i < len(array1) and j < len(array2):\n if array1[i] <= array2[j]:\n out_array.append(array1[i])\n i += 1\n else:\n num_inversions += len(array1[i:])\n out_array.append(array2[j])\n j += 1\n\n out_array.extend(array1[i:])\n out_array.extend(array2[j:])\n\n return out_array, num_inversions", "def shell_sort(A):\r\n inc = len(A) // 2\r\n while inc:\r\n for i in range(len(A)):\r\n j = i\r\n temp = A[i]\r\n while j >= inc and A[j-inc] > temp:\r\n A[j] = A[j - inc]\r\n j -= inc\r\n A[j] = temp\r\n inc = inc//2 if inc//2 else (0 if inc == 1 else 1)\r\n return A", "def insertionSort2(n, arr):\n i = 1\n while i < n:\n unsorted_val = arr[i]\n\n m = i + 1\n j = 1\n while unsorted_val < arr[m - j - 1] and (j < m):\n arr[m - j] = arr[m - j - 1]\n j += 1\n arr[m - j] = unsorted_val\n print(' '.join(map(str, arr)))\n\n i += 1", "def radix_sort(arr):\n if len(arr) < 2:\n return arr\n\n for number in range(len(str(max(arr)))):\n # for the length of the biggest number\n buckets = [[] for i in range(10)]\n for item in arr:\n single_num = item % (10 ** (number + 1))\n \n index = single_num // (10 ** number)\n # print(single_num)\n # print(index)\n buckets[index].append(item)\n result = []\n for bucket in buckets:\n for item in bucket:\n result.append(item)\n \n return result", "def radixSort(arr):\r\n\r\n maxElement = max(arr)\r\n exp = 1\r\n\r\n while maxElement // exp > 0: # jitne digits hai max element me utne baar chalega ga yeh while loop\r\n \"\"\"\r\n yeha par floor div issliye kiya h because maan lo num = 981 and exp 1\r\n 981/1 = 981.0\r\n 981/10 = 98.1\r\n 981/100 = 9.81\r\n 981/1000 = 0.981 # this is useless so we did floor division and when the floor is zero its means we dont have any digit place left\r\n \"\"\"\r\n countingSort(arr,exp)\r\n exp *= 10", "def _rankdata(a):\n a = ravel(a)\n n = len(a)\n ivec = argsort(a)\n svec = take(a, ivec)\n sumranks = dupcount = 0\n newarray = zeros(n,'d')\n for i in range(n):\n sumranks = sumranks + i\n dupcount = dupcount + 1\n if i==n-1 or svec[i] <> svec[i+1]:\n averank = sumranks / float(dupcount) + 1\n for j in range(i-dupcount+1,i+1):\n newarray[ivec[j]] = averank\n sumranks = dupcount = 0\n return newarray", "def sort_counts(combined_counts):\n sorted_counts = sorted(combined_counts, key=operator.itemgetter(1))\n return (sorted_counts)", "def sortColors2(self, nums: List[int]) -> None:\n # Accepted\n # 87/87 cases passed (32 ms)\n # Your runtime beats 86.65 % of python3 submissions\n # Your memory usage beats 46.36 % of python3 submissions (13.1 MB)\n self.sortArray(nums)", "def sort(self, A, index, p, r, rc_arr):\n\n if p < r:\n q = (p + r) // 2\n\n self.sort(A, index, p, q, rc_arr)\n self.sort(A, index, q + 1, r, rc_arr)\n self.merge(A, index, p, q, r, rc_arr)", "def wiggleSort(self, nums: List[int]) -> None:\n temp = sorted(nums)\n s, t = (len(nums) + 1) >> 1, len(nums)\n for i in range(len(nums)):\n if i & 1 == 0:\n s -= 1\n nums[i] = temp[s]\n else:\n t -= 1\n nums[i] = temp[t]", "def counting_sort(the_list, max_value):\n \n # Creates an array that checks num of instances of any int up through max_value\n num_counts = [0] * (max_value + 1)\n \n # Increments count on first pass through input list\n for item in the_list:\n num_counts[item] += 1 \n\n sorted_list = []\n\n for item, count in enumerate(num_counts):\n print \"this is the status of num_counts:\", num_counts\n print \"enumerating item:\", item, \"enumerating count:\", count\n\n for _ in xrange(count):\n sorted_list.append(item)\n\n return sorted_list", "def scipy_rankdata(a):\n a = np.ravel(a)\n n = len(a)\n svec, ivec = fastsort(a)\n sumranks = 0\n dupcount = 0\n newarray = np.zeros(n, float)\n for i in range(n):\n sumranks += i\n dupcount += 1\n if i==n-1 or svec[i] != svec[i+1]:\n averank = sumranks / float(dupcount) + 1\n for j in range(i-dupcount+1,i+1):\n newarray[ivec[j]] = averank\n sumranks = 0\n dupcount = 0\n return newarray", "def check_through_arr(arr, k):\n i = 0\n j = 1\n count = 0\n while j < len(arr):\n if arr[i] + k > arr[j]:\n j += 1\n elif arr[i] + k == arr[j]:\n count += 1\n i += 1\n j += 1\n else:\n i += 1\n\n return count", "def sortColors(self, nums) -> None:\n my_list = [0, 0, 0]\n for digit in nums:\n my_list[digit] += 1\n k = 0 # k指向第一个不为0的数\n while k < 3 and my_list[k] == 0:\n k += 1\n for i in range(len(nums)):\n nums[i] = k\n my_list[k] -= 1\n while k < 3 and my_list[k] == 0:\n k += 1", "def group_anagrams_sort(self, arr):\n arr = sorted(arr, key=self.key_func)\n return arr", "def insertion_sort(arr):\n for j in range(len(arr)):\n for i in range(j, 0, -1):\n if arr[i] >= arr[i-1]:\n continue\n arr[i], arr[i-1] = arr[i-1], arr[i]", "def sort(array):\n\tn = len(array)\n\theap = heapify(array)\n\tres = [pop(heap) for i in range(n)]\n\treturn res", "def get_rank(array: Union[np.ndarray, List]) -> int:\n return len(array) - np.argsort(array).argsort()", "def wiggle_sort(nums):\n\n for i in range(len(nums)):\n if (i % 2 == 1) == (nums[i - 1] > nums[i]):\n nums[i - 1], nums[i] = nums[i], nums[i - 1]", "def sort(\n self, array: List[int], lsd: bool = False, msd: bool = False, **kwargs\n ) -> None:\n\n if lsd: # LSD radix sort\n for i in range(len(str(max(array)))):\n self.countingSort(array, i)\n self.render(array)\n sleep(0.01)\n\n elif msd: # MSD radix sort\n if \"digit\" not in kwargs:\n kwargs[\"digit\"] = None\n\n digit = (\n len(str(max(array))) - 1 if kwargs[\"digit\"] == None else kwargs[\"digit\"]\n )\n output = []\n\n if digit >= 0:\n self.countingSort(array, digit, render=True)\n self.seperate(array, digit)\n\n for i in array:\n self.sort(i, msd=True, digit=digit - 1)\n output += i\n\n else:\n output = array\n\n array[:] = output\n\n else:\n self.sort(array, lsd=True) # LSD by default", "def sort_and_count(list):\n\t\n\tnum_inversions = 0\n \n\t# Checks for base case conditions\n\tif len(list) == 1:\n\t\treturn 0\n\tif len(list) == 2:\n\t\tif(list[0] > list[1]):\n\t\t\ttmp = list[0]\n\t\t\tlist[0] = list[1]\n\t\t\tlist[1] = tmp\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n \n\t# Split list into two halves\n\tlhs = []\n\trhs = []\n\tfor i in range(len(list)):\n\t\tif i < (len(list)/2):\n\t\t\tlhs.append(list[i])\n\t\telse:\n\t\t\trhs.append(list[i])\n\t\t\t\n\t# Recursive calls on each half of original list\n\tlhs_inv = sort_and_count(lhs)\n\trhs_inv = sort_and_count(rhs)\n\tnum_inversions += lhs_inv + rhs_inv\n\t\n\t# Merge, and add inversions counted in merge step\n\tsplits = merge_and_count(lhs, rhs, list)\n\tnum_inversions += splits\n \n\treturn num_inversions", "def sort(self, nums: List[int]) -> None:\n\n # Seperates negative and positive integers\n neg, pos = [], []\n for num in nums:\n if num < 0:\n neg.append(-num)\n else:\n pos.append(num)\n\n # Sorts the negative numbers\n self._sort(neg)\n neg.reverse()\n\n # Sorts the positiv numbers\n self._sort(pos)\n \n # Remerges the sorted subarrays back into the original array.\n i = j = k = 0\n while j < len(neg):\n nums[i] = -neg[j]\n j += 1\n i += 1\n\n while k < len(pos):\n nums[i] = pos[k]\n k += 1\n i += 1", "def sortColors(nums):\r\n #Dutch flag algorithm; one-pass and O(n) time\r\n #other feasible solutions: bubble sort O(n^2), insertion/selection sort O(2n)\r\n \r\n low,mid,high = 0,0,len(nums)-1\r\n \r\n while mid <= high:\r\n \r\n if nums[mid] == 0:\r\n nums[mid],nums[low] = nums[low],nums[mid]\r\n low += 1\r\n mid += 1\r\n \r\n elif nums[mid] == 1:\r\n mid += 1\r\n \r\n else:\r\n nums[mid],nums[high] = nums[high],nums[mid]\r\n high -= 1", "def sort(self, nums: List[int]) -> None:\n if len(nums) <= 1: return\n\n # Find the middle of the array\n mid = len(nums) // 2\n\n # Split the array elements into two halves\n left = nums[:mid]\n right = nums[mid:]\n\n # Sort the left half\n self.sort(left)\n\n # Sort the right half\n self.sort(right)\n\n # Merge the left and right arrays into the original array\n i = j = k = 0\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n nums[k] = left[i]\n i += 1\n else:\n nums[k] = right[j]\n j += 1\n k += 1\n\n # Add the remaining elements of the left array if any\n while i < len(left):\n nums[k] = left[i]\n i += 1\n k += 1\n\n # Add the remaining elements of the right array if any\n while j < len(right):\n nums[k] = right[j]\n j += 1\n k += 1", "def sort_k_messed_array(arr, k):\n\n if k == 0:\n return arr\n\n for i in range(len(arr)):\n min_index = find_min_index(arr, i, i + k)\n arr[i], arr[min_index] = arr[min_index], arr[i]\n\n return arr", "def wiggleSort(self, nums: List[int]) -> None:\n nums.sort()\n for i in range(len(nums) // 2):\n nums.insert(i*2+1, nums.pop())", "def Insertion_sort(arr):\n\n for outer in range(1, len(arr)):\n current_value = arr[outer]\n current_position = outer\n while current_position > 0 and arr[current_position-1] > current_value:\n arr[current_position] = arr[current_position-1]\n current_position = current_position - 1\n arr[current_position] = current_value", "def bubble_sort(arr: List[int]) -> List[int]:\n sorted = False\n while not sorted:\n sorted = True\n for i in range(1, len(arr)):\n if arr[i-1] > arr[i]:\n arr[i], arr[i-1] = arr[i-1], arr[i]\n sorted = False\n return arr", "def bubble_sort(array):\n\tupdated = 1\n\tcounter = 0\n\n\twhile(updated == 1):\n\t\tupdated = 0\n\t\t\n\t\tfor i in range(len(array)-1-counter):\n\t\t\tif array[i] > array[i+1]:\n\t\t\t\tarray[i],array[i+1] = array[i+1],array[i]\n\t\t\t\tupdated = 1\n\t\t\n\t\tcounter += 1\n\n\treturn array", "def bubble_sort_smart(array: list):\n size = len(array)\n\n for i in range(size):\n for j in range(size - i - 1):\n if array[j] > array[j + 1]:\n aux = array[j]\n array[j] = array[j + 1]\n array[j + 1] = aux\n #array[j], array[j + 1] = array[j + 1], array[j]", "def arankdata(inarray):\r\n n = len(inarray)\r\n svec, ivec = ashellsort(inarray)\r\n sumranks = 0\r\n dupcount = 0\r\n newarray = N.zeros(n,N.float_)\r\n for i in range(n):\r\n sumranks = sumranks + i\r\n dupcount = dupcount + 1\r\n if i==n-1 or svec[i] <> svec[i+1]:\r\n averank = sumranks / float(dupcount) + 1\r\n for j in range(i-dupcount+1,i+1):\r\n newarray[ivec[j]] = averank\r\n sumranks = 0\r\n dupcount = 0\r\n return newarray", "def wiggleSort(self, nums: List[int]) -> None:\n\t\tnums.sort()\n\t\tmed = (len(nums) - 1) // 2\n\t\tnums[::2], nums[1::2] = nums[med::-1], nums[:med:-1]", "def bubble_sort_smarter(array: list):\n size = len(array)\n\n while size > 0:\n idx = 0\n for j in range(size):\n if array[j] > array[j + 1]:\n aux = array[j]\n array[j] = array[j + 1]\n array[j + 1] = aux\n #array[j], array[j + 1] = array[j + 1], array[j]\n idx = j + 1\n\n size = idx", "def oldsortslice(self):\n ...", "def sort(arr, filename):\n if len(arr) > 1:\n mid = len(arr) // 2 # Finding the mid of the array\n L = arr[:mid] # Dividing the array elements\n R = arr[mid:] # into 2 halves\n sort(L, filename) # Sorting the first half\n sort(R, filename) # Sorting the second half\n\n i = j = k = 0\n\n # Copy data to temp arrays L[] and R[]\n while i < len(L) and j < len(R):\n if L[i] < R[j]:\n arr[k] = L[i]\n i += 1\n else:\n arr[k] = R[j]\n j += 1\n k += 1\n\n # Checking if any element was left\n while i < len(L):\n arr[k] = L[i]\n i += 1\n k += 1\n\n while j < len(R):\n arr[k] = R[j]\n j += 1\n k += 1\n with open(\"output/temp/\" + filename, \"w\") as file:\n for item in arr:\n file.write('%s\\n' % item)", "def counting_sort(mylist, position):\n length = len(mylist)\n final = [0] * length # Final sorted list\n temp = [0] * 10 # Current sorted list\n\n for i in range(length): # Add last digit of element to temp\n index = mylist[i]\n temp[index % 10] += 1\n\n for i in range(1, 10): # Shift over elements\n temp[i] += temp[i - 1]\n\n i = length - 1 # Index to last element of list\n \n while i >= 0: # Traverse from right to left\n index = mylist[i]\n final[temp[index % 10] - 1] = mylist[i] # Add elements to final list by sorted position order\n temp[index % 10] -= 1\n i -= 1\n\n for i in range(length): # Save final list back to mylist\n mylist[i] = final[i]", "def radixSortNumbers(array):\r\n maxLen = -1\r\n for number in array:\r\n numLen = int(math.log10(int(number[1])+1)) + 1\r\n if numLen > maxLen:\r\n maxLen = numLen\r\n buckets = [[] for i in range(0, 10)]\r\n for digit in range(0, maxLen):\r\n for number in array:\r\n x=int(number[1])+1\r\n buckets[int(x/ 10**digit % 10)].append(number)\r\n del array[:]\r\n for bucket in buckets:\r\n array.extend(bucket)\r\n del bucket[:]\r\n return array", "def count(arr, k):\n dp = [[None]*(k+1) for _ in range(len(arr)+1)]\n for i in range(len(dp)):\n dp[i][0] = 1\n for i in range(1, len(dp[0])):\n dp[0][i] = 0\n for a in dp:\n print(a)\n for i in range(1, len(dp)):\n for j in range(1, len(dp[0])):\n if arr[i-1] <= j:\n dp[i][j] = dp[i-1][j-arr[i-1]] + dp[i-1][j]\n else:\n dp[i][j] = dp[i-1][j]\n for a in dp:\n print(a)\n return dp[-1][-1]", "def sorted_squares(arr: StaticArray) -> StaticArray:\n\n def count_sort_ascending(arr: StaticArray) -> StaticArray:\n \"\"\"\n counts the number of instances that an element appears, then creates a sorted array\n \"\"\"\n # finds the maximum element\n maximum = arr[0]\n for index in range(arr.size()):\n if abs(arr[index]) > maximum:\n maximum = abs(arr[index])\n\n # creates max+1 arrays for positives and negatives\n maximum += 1\n count_pos = StaticArray(maximum)\n\n # records the number of iterations of an array element\n # by setting the corresponding index position of the count array to the number of iterations\n for index in range(arr.size()):\n current = arr[index]\n if abs(current) > 0:\n if count_pos[abs(current)] is None:\n count_pos.set(abs(current), 1)\n else:\n count_pos[abs(current)] += 1\n\n # zero\n elif current == 0:\n if count_pos[0] is None:\n count_pos[0] = 1\n else:\n count_pos[0] += 1\n\n # sums non-empty spaces and sets empty spaces equal to zero\n length = 0\n # iterate through positive array\n for index in range(count_pos.size()):\n if count_pos[index] is None:\n count_pos[index] = 0\n else:\n length += count_pos[index]\n\n # create array for the results\n result_array = StaticArray(length)\n\n result_array_index = 0\n\n # adds elements in positive array to results array from largest to smallest\n for index in range(count_pos.size()):\n while count_pos[index] > 0:\n result_array.set(result_array_index, index)\n result_array_index += 1\n count_pos[index] -= 1\n\n return result_array\n # end count_sort_ascending function declaration\n\n # creates a result array and runs count_sort_ascending on array to sort\n result_array = count_sort_ascending(arr)\n # squares the results\n for index in range(result_array.size()):\n result_array[index] **= 2\n\n return result_array", "def generate_order(arr, descending=True):\n sorted_indices = torch.argsort(arr, 0, descending=descending)\n return sorted_indices.reshape((len(arr), ))", "def group_count(counts, comp_ids):\n # binning\n for i in range(comp_ids.size):\n val = comp_ids[i]\n counts[val] += 1\n # inclusive scan\n total = 0\n for i in range(counts.size):\n ct = counts[i]\n counts[i] = ct + total\n total += ct", "def bubble_sort(array: list):\n size = len(array)\n\n for i in range(size):\n for j in range(size - 1):\n if array[j] > array[j + 1]:\n aux = array[j]\n array[j] = array[j + 1]\n array[j + 1] = aux\n #array[j], array[j + 1] = array[j + 1], array[j]", "def sort(array):\n\n for i in range(0, len(array)):\n for j in range(len(array)-1, i, -1):\n if array[j] < array[j-1]:\n ## Move the lighter bubble up\n _temp = array[j]\n array[j] = array[j-1]\n array[j-1] = _temp\n \n return array", "def _merge(arr, temp, left, mid, right):\n i = left\n j = mid\n k = left\n inv_count = 0\n while i < mid and j <= right:\n if arr[i] < arr[j]:\n temp[k] = arr[i]\n k += 1\n i += 1\n else:\n temp[k] = arr[j]\n k += 1\n j += 1\n inv_count += (mid -i)\n while i < mid:\n temp[k] = arr[i]\n k += 1\n i += 1\n if j <= right:\n k += right - j + 1\n j += right - j + 1\n arr[left:k + 1] = temp[left:k + 1]\n else:\n arr[left:right + 1] = temp[left:right + 1]\n return inv_count", "def insertion_sort(array):\n n = len(array)\n result = array.copy()\n\n # Swap each value backwards until in correct position\n for i in range(1, n):\n j = i\n while j > 0 and compare(result[j], result[j - 1]) < 0:\n result[j], result[j - 1] = result[j - 1], result[j]\n j -= 1\n\n return result", "def test_sort_array(self):\r\n self.assertEqual(sort_array([6, 4, 9, 10]), [4, 6, 9, 10])", "def sum_array_ranking_to_borda_count(borda_count, corr_array):\n num_elem = borda_count.size\n\n # either assign (no duplicate case) or enumerate the correlation array\n if num_elem == (np.unique(corr_array)).size:\n borda_count[np.argsort(corr_array)] += np.int_(sorted(np.arange(0, corr_array.size) + 1))\n return borda_count\n\n # enumerate the borda vote\n borda_add = np.zeros(num_elem)\n enum_value = 1\n sort_order = np.argsort(corr_array)\n current_value = corr_array[sort_order[0]]\n for k in range(0, num_elem):\n if corr_array[sort_order[k]] != current_value:\n enum_value += 1\n current_value = corr_array[sort_order[k]]\n borda_add[sort_order[k]] = enum_value\n\n # scale to the number of elements in the array -- philosopical choice here --\n borda_add = borda_add + (num_elem - enum_value)\n\n return borda_count + borda_add" ]
[ "0.78273445", "0.77633786", "0.7481935", "0.7466987", "0.7410261", "0.7389358", "0.7366042", "0.7251443", "0.7188289", "0.71487", "0.7146158", "0.70710707", "0.6989624", "0.6783512", "0.6743108", "0.66933733", "0.66844094", "0.65865797", "0.648153", "0.646309", "0.6394911", "0.6391388", "0.61900467", "0.61776835", "0.61502886", "0.6135831", "0.61355036", "0.6079525", "0.6063541", "0.6033639", "0.6030928", "0.5964798", "0.5940977", "0.59240025", "0.5876702", "0.5861026", "0.58577526", "0.5855692", "0.58556193", "0.58443916", "0.58059424", "0.58055836", "0.57971215", "0.5784519", "0.57787013", "0.5768055", "0.57659394", "0.57639796", "0.575694", "0.5736689", "0.57358015", "0.57287437", "0.57076234", "0.57016206", "0.569852", "0.5693243", "0.56902283", "0.56900203", "0.567984", "0.5653213", "0.5650751", "0.56479776", "0.56347954", "0.56325907", "0.5630979", "0.562709", "0.5601476", "0.5592593", "0.5592361", "0.55902857", "0.5586459", "0.5577583", "0.55683124", "0.5552557", "0.5542617", "0.5525883", "0.5518006", "0.5508072", "0.5504687", "0.55039346", "0.55034024", "0.55029714", "0.5501991", "0.54904264", "0.5486042", "0.54836977", "0.5483108", "0.54830766", "0.54780716", "0.5476043", "0.5470027", "0.54696524", "0.54663324", "0.54655886", "0.5457743", "0.54524344", "0.54509914", "0.5444404", "0.5442886", "0.543665" ]
0.8207626
0
This test case checks that a field value has the expected values.
def test_props_fields( self, splunk_search_util, splunk_searchtime_fields_positive, record_property ): # Search Query record_property("stanza_name", splunk_searchtime_fields_positive["stanza"]) record_property("stanza_type", splunk_searchtime_fields_positive["stanza_type"]) record_property("fields", splunk_searchtime_fields_positive["fields"]) index_list = "(index=" + " OR index=".join(splunk_search_util.search_index.split(',')) + ")" search = ( f"search {index_list}" f" {splunk_searchtime_fields_positive['stanza_type']}=\"" f"{splunk_searchtime_fields_positive['stanza']}\"" ) for field_dict in splunk_searchtime_fields_positive["fields"]: field = Field(field_dict) expected_values = ", ".join([f'"{each}"' for each in field.expected_values]) negative_values = ", ".join([f'"{each}"' for each in field.negative_values]) search = ( search + f" AND ({field} IN ({expected_values})" f" AND NOT {field} IN ({negative_values}))" ) search += " | stats count by sourcetype" self.logger.info(f"Executing the search query: {search}") # run search result = splunk_search_util.checkQueryCountIsGreaterThanZero( search, interval=splunk_search_util.search_interval, retries=splunk_search_util.search_retry ) record_property("search", search) assert result, ( f"No result found for the search.\nsearch={search}\n" f"interval={splunk_search_util.search_interval}, retries={splunk_search_util.search_retry}" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_field_rules():", "def test_many_values(self):\n write this test!", "def _assert_fields_match(self, actual_field, expected_field):\n assert actual_field is not None, \"Could not find field {name}\".format(name=expected_field[\"name\"])\n\n for key in expected_field:\n assert actual_field[key] == expected_field[key], \\\n \"Expected {expected} for {key} but got {actual} instead\".format(\n key=key, actual=actual_field[key], expected=expected_field[key])", "def check_value(self, value):", "def _check_filter_value(self, cleaned_data, expected):\n self.assertEqual(cleaned_data, expected)", "def test_should_name_field(self):\n self.assertIn(\"name\", self.fields)", "def test_validate_field_data(self, **test_dict):\n test_data = TestData()\n test_data.weight = test_dict['weight']\n test_data.max_attempts = test_dict['max_attempts']\n validation = set()\n self.xblock.validate_field_data(validation, test_data)\n validation_list = list(validation)\n # Only one validation error should be in set\n self.assertEquals(1, len(validation_list))\n self.assertEquals(\n test_dict['result'],\n validation_list[0].text,\n )", "def test_Fieldform_has_fields(self):\n self.assertSequenceEqual(\n [\n \"date\",\n \"start_time\",\n \"end_time\",\n \"temperature\",\n \"humidity\",\n \"coordinator\",\n \"staff\",\n \"parcel_id\",\n ],\n list(self.Fieldform.fields),\n )", "def test_values(self):\n user = User()\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.password, \"\")\n self.assertEqual(user.first_name, \"\")\n self.assertEqual(user.last_name, \"\")", "def test_build_field_query(field, value, expected):\n assert _build_field_query(field, value).to_dict() == expected", "def test_mutate_field(self):\n # Test adding a field\n with self.assertRaises(ValueError):\n self.email.add_field('', '')\n\n self.email.add_field(self.key, self.regex)\n\n found_key = False\n found_regex = r''\n for field in self.email.fields:\n if field['key'] == self.key:\n found_key = True\n found_regex = field['regex']\n\n self.assertTrue(found_key)\n self.assertEqual(found_regex, self.regex)\n\n # Test getting a field\n with self.assertRaises(LookupError):\n self.email.get_field('')\n\n field = self.email.get_field(self.key)\n self.assertEqual(\n field, {'key': self.key, 'regex': self.regex, 'value': []})\n\n # Test removing a field\n with self.assertRaises(LookupError):\n self.email.remove_field('')\n\n self.email.remove_field(self.key)\n\n found_key = False\n found_regex = r''\n for field in self.email.fields:\n if field['key'] == self.key:\n found_key = True\n found_regex = field['regex']\n\n self.assertFalse(found_key)\n self.assertNotEqual(found_regex, self.regex)", "def test_positive_value_exists(self):\n #######################################\n # Test for True\n value_to_test = 1\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = 100\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = 'hello'\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = True\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = {\n 'success': True\n }\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = [\n 'success'\n ]\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n #######################################\n # Test for False\n value_to_test = 0\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = -1\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = ''\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = '0'\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = False\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = {}\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = []\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))", "def test_int_field():", "def test_SameNumberOfFields(self):\n pass", "def test_has_correct_value(self):\n self.assertEqual(self.node.value, 7)", "def test_fields_presence(self):\n form = DCEventRequestForm()\n fields_left = set(form.fields.keys())\n fields_right = set([\n 'name', 'email', 'affiliation', 'location', 'country',\n 'conference', 'preferred_date', 'language', 'workshop_type',\n 'approx_attendees', 'attendee_domains', 'attendee_domains_other',\n 'data_types', 'data_types_other', 'attendee_academic_levels',\n 'attendee_data_analysis_level', 'cover_travel_accomodation',\n 'understand_admin_fee', 'fee_waiver_request',\n 'travel_reimbursement', 'travel_reimbursement_other',\n 'comment', 'privacy_consent', 'captcha',\n ])\n self.assertEqual(fields_left, fields_right)", "def test__validate_owner__1():\n for field_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_owner(field_value)", "def test_values(self):\n self.assertEqual([self.expected_described_model], list(self.mapped_model.values()))", "def test_fields(self):\n form = self._get_form(data=None)\n self.assertEquals(len(form.fields), 4)\n self.assertTrue('tests_url' in form.fields)\n self.assertTrue('repo_url' in form.fields)\n self.assertTrue('pkg_type' in form.fields)\n self.assertTrue('tags' in form.fields)", "def test_bool_field():", "def check_expected_values(self, expected_values, scraped_values):\n\n\t\tfor key in expected_values:\n\t\t\tself.assertIn(key, scraped_values)\n\t\t\tself.assertEqual(expected_values[key], scraped_values[key])", "def testAddingPropertyFields(self):\n map_sheet = self.properties[PROPERTY_SHEET]\n for key, value in PROPS.items():\n self.failUnless(map_sheet.hasProperty(key) and list(map_sheet.getProperty(key)) == value)", "def testRequiredFields(self):\n required = Project.required_fields()\n\n self.assertEqual(type(required), tuple,\n \"required_fields() returns a tuple.\")\n\n self.assertTrue(len(required) > 0,\n \"required_field() did not return empty value.\")", "def test_model_keeps_value( self ):\r\n\t\tself.assertEqual( self.m_test_model.custom_field, custom_data )", "def test_positive_validation_decision(self, form_field_name, user_data):\n self.assertValidationDecision(\n {form_field_name: user_data},\n {form_field_name: ''}\n )", "def test_fields_presence(self):\n form = SWCEventRequestForm()\n fields_left = set(form.fields.keys())\n fields_right = set([\n 'name', 'email', 'affiliation', 'location', 'country',\n 'conference', 'preferred_date', 'language', 'workshop_type',\n 'approx_attendees', 'attendee_domains', 'attendee_domains_other',\n 'attendee_academic_levels', 'attendee_computing_levels',\n 'cover_travel_accomodation', 'understand_admin_fee',\n 'travel_reimbursement', 'travel_reimbursement_other',\n 'admin_fee_payment', 'comment', 'captcha', 'privacy_consent',\n ])\n self.assertEqual(fields_left, fields_right)", "def test_list_field():", "def test_check_data_fields(self):\r\n\r\n header =\\\r\n ['SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Description']\r\n mapping_data = [['s1', 'ACGT', 'AAAA', 's1_data'],\r\n ['s2', 'CGTA', 'AAAA', 's2_data']]\r\n errors = []\r\n warnings = []\r\n\r\n errors, warnings = check_data_fields(header,\r\n mapping_data, errors, warnings)\r\n\r\n expected_errors = []\r\n expected_warnings = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n self.assertEqual(warnings, expected_warnings)", "def test_fields(self):\n form = self._get_form(data=None)\n self.assertEquals(len(form.fields), 5)\n self.assertTrue('pkg_type' in form.fields)\n self.assertTrue('name' in form.fields)\n self.assertTrue('tests_url' in form.fields)\n self.assertTrue('repo_url' in form.fields)\n self.assertTrue('tags' in form.fields)", "def test_col_data_field(self):\n help_tag = 'span'\n help_text_br = False\n label_attrs = {}\n names = ('first', 'billing_address_1')\n expected = [self.form[name] for name in names]\n actual = []\n for name in names:\n field = self.form.fields[name]\n response = self.form.collect_col_data(name, field, help_tag, help_text_br, label_attrs)\n actual.append(response.get('field'))\n\n for expect, got in zip(expected, actual):\n self.assertEqual(expect, got)", "def test_is_valid_annotation_value_invalid_input():\n # test valid label values\n assert not is_valid_annotation_value(value=1)", "def test_favourite_fields(self):\n\n fav = Favourite.objects.get(id=1)\n\n # test the type of former_barcode field\n fav_type = fav._meta.get_field('former_barcode').get_internal_type()\n self.assertEqual(fav_type, 'CharField')\n # label former_barcode\n max_length = fav._meta.get_field('former_barcode').max_length\n self.assertEqual(max_length, 80)\n # test blank field in label former_barcode\n fav_blank = fav._meta.get_field('former_barcode').blank\n self.assertFalse(fav_blank)\n # test null field in label former_barcode\n fav_null = fav._meta.get_field('former_barcode').null\n self.assertFalse(fav_null)\n\n # test the type of favourite_barcode field\n fav_type = fav._meta.get_field('favourite_barcode').get_internal_type()\n self.assertEqual(fav_type, 'CharField')\n # label favourite_barcode\n max_length = fav._meta.get_field('favourite_barcode').max_length\n self.assertEqual(max_length, 80)\n # test blank field in label favourite_barcode\n fav_blank = fav._meta.get_field('favourite_barcode').blank\n self.assertFalse(fav_blank)\n # test null field in label favourite_barcode\n fav_null = fav._meta.get_field('favourite_barcode').null\n self.assertFalse(fav_null)\n\n # test the type of email_user field\n fav_type = fav._meta.get_field('email_user').get_internal_type()\n self.assertEqual(fav_type, 'CharField')\n # label email_user\n max_length = fav._meta.get_field('email_user').max_length\n self.assertEqual(max_length, 150)\n # test blank field in label email_user\n fav_blank = fav._meta.get_field('email_user').blank\n self.assertFalse(fav_blank)\n # test null field in label email_user\n fav_null = fav._meta.get_field('email_user').null\n self.assertFalse(fav_null)", "def test_fields(self):\n\n class Foo(Model):\n field1 = StringField()\n field2 = IntegralField()\n\n assert hasattr(Foo, \"_fields\")\n assert type(Foo._fields) is dict\n\n assert not hasattr(Foo, \"field1\")\n assert \"field1\" in Foo._fields\n assert type(Foo._fields[\"field1\"]) is StringField\n\n assert not hasattr(Foo, \"field2\")\n assert \"field2\" in Foo._fields\n assert type(Foo._fields[\"field2\"]) is IntegralField", "def test_construct_values_raises_for_missing_fields(self):\n message = \"There must me one or more field names to compute a value. \"\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.construct_value_from_values()\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.construct_value_from_values('')\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.construct_value_from_values([])", "def test_is_valid_label_value_valid_input():\n # test valid label values\n assert is_valid_label_value(value=None)\n assert is_valid_label_value(value=\"\")\n assert is_valid_label_value(value=\"l0L\")\n assert is_valid_label_value(value=\"L-l\")\n assert is_valid_label_value(value=\"L.L\")\n assert is_valid_label_value(value=\"l_4\")\n assert is_valid_label_value(value=\"4-you\")\n assert is_valid_label_value(value=\"You.2\")", "def test_set_empty_field(self):\n self._p.fields = {}\n received = self._p.fields\n expected = {}\n msg = 'Setting field with empty list should not produce error.'\n self.assertDictEqual(received, expected, msg)", "def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)", "def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)", "def test_single_value(self, test_input, expected, sc):\n assert sc.add(test_input) == expected", "def test__VerificationFieldPlatform__value():\n for instance in VerificationFieldPlatform.INSTANCES.values():\n vampytest.assert_instance(instance.value, VerificationFieldPlatform.VALUE_TYPE)", "def test_entities__Entity__getField__2(entity):\n assert IDummy['dummy2'] == entity.getField('dummy2')", "def test_construct_values_as_expected(self):\n constructor_fields = ('first', 'second', 'last', )\n values = ['FirstValue', 'SecondValue', 'LastValue']\n expected = '_**_'.join(ea for ea in values if ea).casefold()\n cleaned_data = getattr(self.form, 'cleaned_data', {})\n cleaned_data.update(dict(zip(constructor_fields, values)))\n self.form.cleaned_data = cleaned_data\n actual = self.form.construct_value_from_values(constructor_fields, '_**_')\n simple = self.form.construct_value_from_values(constructor_fields)\n\n self.assertEqual(expected, actual)\n self.assertEqual('firstvalue_**_secondvalue_**_lastvalue', actual)\n self.assertEqual('_'.join(values).casefold(), simple)\n self.assertEqual('firstvalue_secondvalue_lastvalue', simple)", "def test_prep_fields(self):\n pass", "def test_text_field():", "def test_entities__Field__1():\n zope.interface.verify.verifyObject(IField, Field())", "def test_select_field():", "def test_get_error_data_when_no_errors(self):\n field_setup = None\n error_names = ['non-field_name', 'not_a_field']\n prepared_info = self.setup_error_data(field_setup, error_names)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])", "def test_check(self):\n\t\tself.filter.set_operator('.equals')\n\t\tfor lim in [10, '10']:\n\t\t\tself.filter.set_limit(lim)\n\t\t\tfor val in [10, '10']:\n\t\t\t\tob = Object(field=val)\n\t\t\t\tself.assertTrue(self.filter.check(ob), \"Failed Filter comparison check - Fields should match!\")", "def test_has_value(self) -> None:\n self.assertTrue(LogLevels.has_value(0))\n self.assertTrue(LogLevels.has_value(10))\n self.assertTrue(LogLevels.has_value(15))\n self.assertTrue(LogLevels.has_value(20))\n self.assertTrue(LogLevels.has_value(25))\n self.assertTrue(LogLevels.has_value(30))\n self.assertTrue(LogLevels.has_value(35))\n self.assertTrue(LogLevels.has_value(40))\n self.assertTrue(LogLevels.has_value(50))", "def test_team_reg_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_team_reg(input_val)\n self.assertEqual(output_val, self.line.team_reg)", "def test_field_value_list(self):\n field = '3C273,M30'\n ref_idx = [2,3]\n self.res=self.run_task(infile=self.rawfile,field=field,calmode=self.calmode,outfile=self.outname,outform='ASAP')\n self.assertEqual(self.res,None,\n msg='Any error occurred during calibration')\n self._compare_with_analytic(self.outname, self.line, self.baseline, ref_idx)", "def testFields(self):\n requested_fields = [\"FormNumber\", \"Title\"]\n table = self.auth.table(self.dataset,\n self.table2,\n fields=requested_fields)\n table_columns = table[0].keys()\n for x in requested_fields:\n self.assertTrue(x in table_columns)\n # Account for the extra '__mmetadata' key\n self.assertEqual(len(requested_fields) + 1, len(table_columns))", "def test_get_error_data_table_some_col_errors(self):\n field_setup = None\n error_names = ['first', 'billing_address_1', 'billing_country_area']\n prepared_info = self.setup_error_data(field_setup, error_names, True)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])\n pass", "def test_set_non_dictionary_based_field(self):\n self.assertRaises(TypeError, self._p.set_fields, '')", "def test_is_valid_annotation_value_valid_input():\n # test valid label values\n assert is_valid_annotation_value(value=None)\n assert is_valid_annotation_value(value=\"\")\n assert is_valid_annotation_value(value=\"l0L\")\n assert is_valid_annotation_value(value=\"L-l\")\n assert is_valid_annotation_value(value=\"L.L\")\n assert is_valid_annotation_value(value=\"l_4\")\n assert is_valid_annotation_value(value=\"4-you\")\n assert is_valid_annotation_value(value=\"You.2\")", "def test_registration_empty_Fields(self):\r\n print('========================================================================')\r\n print('Negative test for check the validation entering the strigs with spaces on registration fields')\r\n # Load Registrtion page\r\n self.reg_page.open_registration_page()\r\n driver = self.reg_page.driver\r\n\r\n # cheks if right title\r\n assert self.reg_page.is_title_matches(), \"Registration title page doesn't match\"\r\n\r\n str_with_spaces = ' '\r\n\r\n self.reg_page.fill_name(str_with_spaces)\r\n self.reg_page.fill_email(str_with_spaces)\r\n self.reg_page.fill_password(str_with_spaces)\r\n self.reg_page.fill_confirm_password(str_with_spaces)\r\n\r\n self.reg_page.click_sign_up_btn()\r\n\r\n #test that regiastartion page is opened\r\n assert self.reg_page.is_title_matches(), \"Registration title page doesn't match\"\r\n\r\n prifileObj = RegistrationProfile()\r\n\r\n time.sleep(3)\r\n #get count elements with error message\r\n cnt_error = self.reg_page.get_count_hasError_fields()\r\n print('cnt_error='+str(cnt_error))\r\n\r\n #check that we have right the error elements count\r\n\r\n assert cnt_error == prifileObj.count_registration_hasError_fields, \\\r\n \"Count requirements fields has Errors doesn't match\"\r\n\r\n # check that we have right the header about incorrect input dara\r\n assert self.reg_page.is_error_validation_header(), \"No error header\"\r\n\r\n #check that each required field has uder the right error validation text\r\n\r\n #check field Name\r\n assert self.reg_page.get_hasError_validation_text_for_field('Name') == prifileObj.valid_requirement_text_for_name, \\\r\n \"No validation message for Name field\"\r\n #check field Email Adress\r\n assert self.reg_page.get_hasError_validation_text_for_field('E-Mail Address') == prifileObj.valid_requirement_text_for_email, \\\r\n \"No validation message for Email field\"\r\n # check field Password\r\n assert self.reg_page.get_hasError_validation_text_for_field(\r\n 'Password') == prifileObj.valid_requirement_text_for_password, \\\r\n \"No validation message for Password field\"\r\n\r\n print('--------- SUCCESS test_registration_empty_Fields -----------')\r\n driver.quit()", "def test_validate(self):\n pass", "def test_single_field_success(self, client):\n field_dependency = client.single_field_dependency(9)\n assert field_dependency == self.test_field_dep", "def test_required_field_values_are_present():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': True,\n 'persisted': True}}\n product1 = {'language': 'english'}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)", "def test_get_error_data_some_col_errors(self):\n field_setup = None\n error_names = ['first', 'billing_address_1', 'billing_country_area']\n prepared_info = self.setup_error_data(field_setup, error_names)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])\n pass", "def test_query_api_result_fields():\n # Pick the first result and test for all fields\n result = query_api(url, \"test\")[0]\n assert all(field in result.keys() for field in fields)", "def test_checkout_fields_prefilled(self):\n checkout_fields = self.build_checkout_form()\n for name, value in checkout_fields.items():\n # django fields ids are formatted \"id=id_{field_name}\"\n field_id = 'id_' + name\n field = self.browser.find_element_by_id(field_id)\n self.assertEqual(\n field.get_attribute('value'), str(value)\n )", "def test_fieldValueTypes(self):\n # tests for \"method\" and \"datetime\" values follow later on ...\n # booleans are not tested yet\n\n factory = self.root.manage_addProduct['Formulator']\n factory.manage_add('form', 'ValueTest')\n factory.manage_add('form2', 'ValueTest')\n form = self.root.form\n form.manage_addField('int_field', 'Test Integer Field', 'IntegerField')\n form.manage_addField('float_field', 'Test Float Field', 'FloatField')\n form.manage_addField('date_field', 'Test Date Field', 'DateTimeField')\n form.manage_addField('list_field', 'Test List Field', 'ListField')\n form.manage_addField(\n 'multi_field',\n 'Test Checkbox Field',\n 'MultiCheckBoxField')\n form.manage_addField('link_field', 'Test Link Field', 'LinkField')\n form.manage_addField('empty_field', 'Test Empty Field', 'StringField')\n int_field = form.int_field\n float_field = form.float_field\n date_field = form.date_field\n list_field = form.list_field\n multi_field = form.multi_field\n link_field = form.link_field\n empty_field = form.empty_field\n\n # XXX editing fields by messing with a fake request\n # -- any better way to do this?\n # (could assign to \"values\" directly ...)\n\n default_values = {'field_title': 'Test Title',\n 'field_display_width': '92',\n 'field_required': 'checked',\n 'field_enabled': 'checked',\n }\n try:\n form_values = default_values.copy()\n form_values.update({'field_default': 'None',\n 'field_required': '',\n })\n empty_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': '42',\n 'field_enabled': 'checked'})\n int_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': '1.7'})\n float_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n # XXX cannot test \"defaults to now\", as this may fail randomly\n form_values = default_values.copy()\n form_values.update({'field_input_style': 'list',\n 'field_input_order': 'mdy',\n 'field_date_only': '',\n 'field_css_class': 'test_css',\n 'field_time_separator': '$'})\n date_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': 'foo',\n 'field_size': '1',\n 'field_items': 'Foo | foo\\n Bar | bar'})\n list_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update(\n {'field_default': 'foo',\n 'field_size': '3',\n 'field_items': 'Foo | foo\\n Bar | bar\\nBaz | baz',\n 'field_orientation': 'horizontal',\n 'field_view_separator': '<br />\\n'})\n multi_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': 'http://www.absurd.org',\n 'field_required': '1',\n 'field_check_timeout': '5.0',\n 'field_link_type': 'external',\n })\n link_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n except ValidationError as e:\n self.fail('error when editing field %s; error message: %s' %\n (e.field_id, e.error_text))\n\n form2 = self.root.form2\n\n xml = formToXML(form)\n XMLToForm(xml, form2)\n\n self.assertEqualForms(form, form2)\n\n request = TestRequest()\n request.form['field_int_field'] = '42'\n request.form['field_float_field'] = '2.71828'\n request.form['subfield_date_field_month'] = '11'\n request.form['subfield_date_field_day'] = '11'\n # This field only allows ten years in the future, today 2023-03-14\n request.form['subfield_date_field_year'] = '2033'\n request.form['subfield_date_field_hour'] = '09'\n request.form['subfield_date_field_minute'] = '59'\n request.form['field_list_field'] = 'bar'\n request.form['field_multi_field'] = ['bar', 'baz']\n request.form['field_link_field'] = 'http://www.zope.org'\n try:\n result1 = form.validate_all(request)\n except FormValidationError as e:\n # XXX only render first error ...\n self.fail('error when editing form1, field %s; error message: %s' %\n (e.errors[0].field_id, e.errors[0].error_text))\n\n try:\n result2 = form2.validate_all(request)\n except FormValidationError as e:\n # XXX only render first error ...\n self.fail('error when editing form1, field %s; error message: %s' %\n (e.errors[0].field_id, e.errors[0].error_text))\n self.assertEqual(result1, result2)\n self.assertEqual(42, result2['int_field'])\n self.assertEqual(2.71828, result2['float_field'])\n\n # check link field timeout value\n self.assertEqual(link_field.get_value('check_timeout'),\n form2.link_field.get_value('check_timeout'))\n\n # XXX not tested: equal form validation failure on invalid input", "def check_validity(self):", "def test_defining_only_or_defer_on_nonexistant_fields_fails(self):", "def test_form_fields(self):\n\n response = self.client.get(reverse('edit-poi', kwargs={'id': '1'}))\n\n fields = {\n \"name\": \"Newport Lighthouse\",\n \"alt_name\": \"\",\n \"latitude\": 43.966874,\n \"longitude\": -124.10534,\n \"description\": \"A pretty nice lighthouse\",\n \"history\": \"It was built at some time in the past\",\n \"facts\": \"It's a lighthouse\",\n \"street\": \"123 Fake St\",\n \"city\": \"Newport\",\n \"state\": \"Oregon\",\n \"location_description\": \"out on the cape over there\",\n \"zip\": \"11234\",\n \"website\": \"\",\n \"email\": \"\",\n \"phone\": None,\n }\n\n form = response.context['poi_form']\n\n for field in fields:\n self.assertEqual(fields[field], form[field].value())", "def testValidate_Valid(self):\n values = {\n messages.IntegerField: 10,\n messages.FloatField: 1.5,\n messages.BooleanField: False,\n messages.BytesField: b'abc',\n messages.StringField: u'abc',\n }\n\n def action(field_class):\n # Optional.\n field = field_class(1)\n field.validate(values[field_class])\n\n # Required.\n field = field_class(1, required=True)\n field.validate(values[field_class])\n\n # Repeated.\n field = field_class(1, repeated=True)\n field.validate([])\n field.validate(())\n field.validate([values[field_class]])\n field.validate((values[field_class],))\n\n # Right value, but not repeated.\n self.assertRaises(messages.ValidationError,\n field.validate,\n values[field_class])\n self.assertRaises(messages.ValidationError,\n field.validate,\n values[field_class])\n\n self.ActionOnAllFieldClasses(action)", "def test_value_error(self):\n self._error_test(ValueError)", "def test_stub(self):\n self.assertEqual(self._value, True)", "def test_hidden_field():", "def check_unexpected_values(self, expected_values, scraped_values):\n\n\t\tfor key in scraped_values:\n\t\t\tself.assertIn(key, expected_values)", "def test_get_error_data_table_when_no_errors(self):\n field_setup = None\n error_names = ['non-field_name', 'not_a_field']\n prepared_info = self.setup_error_data(field_setup, error_names, True)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])", "def test_make_form_field():", "def _check_fields(self, content: JsonDict) -> None:\n self.assertIn(\"id\", content)\n self.assertIn(\"received_ts\", content)\n self.assertIn(\"room_id\", content)\n self.assertIn(\"event_id\", content)\n self.assertIn(\"user_id\", content)\n self.assertIn(\"sender\", content)\n self.assertIn(\"canonical_alias\", content)\n self.assertIn(\"name\", content)\n self.assertIn(\"event_json\", content)\n self.assertIn(\"score\", content)\n self.assertIn(\"reason\", content)\n self.assertIn(\"auth_events\", content[\"event_json\"])\n self.assertIn(\"type\", content[\"event_json\"])\n self.assertIn(\"room_id\", content[\"event_json\"])\n self.assertIn(\"sender\", content[\"event_json\"])\n self.assertIn(\"content\", content[\"event_json\"])", "def test_date_field():", "def test_prep_field_properties(self):\n original_data = self.form.data\n test_data = original_data.copy()\n # modify values in data\n test_data._mutable = False\n self.form.data = test_data\n original_fields = self.form.fields\n test_fields = original_fields.copy()\n # modify fields\n self.form.fields = test_fields\n test_fields_info = {name: field.__dict__.copy() for name, field in test_fields.items()}\n original_get_overrides = self.form.get_overrides\n def skip_overrides(): return {}\n self.form.get_overrides = skip_overrides\n original_alt_field_info = getattr(self.form, 'alt_field_info', None)\n self.form.alt_field_info = self.alt_field_info\n self.form.test_condition_response = True\n expected_fields_info = test_fields_info.copy()\n result_fields = self.form.prep_fields()\n result_fields_info = {name: field.__dict__.copy() for name, field in result_fields.items()}\n modified_info = self.alt_field_info['alt_test_feature']\n first_label = modified_info['first']['label']\n first_initial = modified_info['first']['initial']\n last_initial = modified_info['last']['initial']\n for name, opts in modified_info.items():\n expected_fields_info[name].update(opts)\n\n self.assertEqual(first_label, result_fields['first'].label)\n self.assertEqual(first_initial, result_fields['first'].initial)\n self.assertEqual(last_initial, result_fields['last'].initial)\n for key, val in expected_fields_info.items():\n self.assertEqual(val, result_fields_info[key])\n self.assertDictEqual(expected_fields_info, result_fields_info)\n\n self.form.test_condition_response = False\n self.form.alt_field_info = original_alt_field_info\n if original_alt_field_info is None:\n del self.form.alt_field_info\n self.form.fields = original_fields\n self.form.data = original_data\n self.form.get_overrides = original_get_overrides", "def test_correct_result_for_no_value(self):\n form = UserRegistrationForm(\n {\n 'email': '', \n 'username': '', \n 'password1': '', \n 'password2': ''\n })\n self.assertFalse(form.is_valid())\n self.assertEqual(form.errors['email'], [u'This field is required.'])\n self.assertEqual(form.errors['username'], [u'This field is required.'])\n self.assertEqual(form.errors['password1'], [u'This field is required.'])\n self.assertEqual(form.errors['password2'], [u'This field is required.'])", "def test_data_in_param(self):", "def test_validation(self):\n self.validationFails()", "def test_birth_validation(self):", "def test_entities__Entity__getFieldValues__1(\n entity_with_field, schemaized_field):\n entity = entity_with_field\n entity.setFieldOrder(['dummy2', schemaized_field.__name__, 'dummy'])\n assert ([IDummy['dummy2'], schemaized_field, IDummy['dummy']] ==\n entity.getFieldValues())", "def test_attribute_types(self):\n self.assertIsInstance(self.user_1.email, str)\n self.assertIsInstance(self.user_1.password, str)\n self.assertIsInstance(self.user_1.first_name, str)\n self.assertIsInstance(self.user_1.last_name, str)", "def test_product_fields(self):\n\n prd = Product.objects.get(id=1)\n\n # test the type of name field\n prd_type = prd._meta.get_field('name').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label name\n max_length = prd._meta.get_field('name').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label name\n prd_blank = prd._meta.get_field('name').blank\n self.assertTrue(prd_blank)\n # test null field in label name\n prd_null = prd._meta.get_field('name').null\n self.assertTrue(prd_null)\n\n # test the type of description field\n prd_type = prd._meta.get_field('description').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label description\n max_length = prd._meta.get_field('description').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label description\n prd_blank = prd._meta.get_field('description').blank\n self.assertTrue(prd_blank)\n # test null field in label description\n prd_null = prd._meta.get_field('description').null\n self.assertTrue(prd_null)\n\n # test the type of nutrition_grade field\n prd_type = prd._meta.get_field('nutrition_grade').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label nutrition_grade\n max_length = prd._meta.get_field('nutrition_grade').max_length\n self.assertEqual(max_length, 1)\n # test blank field in label nutrition_grade\n prd_blank = prd._meta.get_field('nutrition_grade').blank\n self.assertTrue(prd_blank)\n # test null field in label nutrition_grade\n prd_null = prd._meta.get_field('nutrition_grade').null\n self.assertTrue(prd_null)\n\n # test the type of barcode field\n prd_type = prd._meta.get_field('barcode').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label barcode\n max_length = prd._meta.get_field('barcode').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label barcode\n prd_blank = prd._meta.get_field('barcode').blank\n self.assertFalse(prd_blank)\n # test null field in label barcode\n prd_null = prd._meta.get_field('barcode').null\n self.assertFalse(prd_null)\n\n # test the type of url field\n prd_type = prd._meta.get_field('url').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url\n max_length = prd._meta.get_field('url').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url\n prd_blank = prd._meta.get_field('url').blank\n self.assertTrue(prd_blank)\n # test null field in label url\n prd_null = prd._meta.get_field('url').null\n self.assertTrue(prd_null)\n\n # test the type of url_pic field\n prd_type = prd._meta.get_field('url_pic').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url_pic\n max_length = prd._meta.get_field('url_pic').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url_pic\n prd_blank = prd._meta.get_field('url_pic').blank\n self.assertTrue(prd_blank)\n # test null field in label url_pic\n prd_null = prd._meta.get_field('url_pic').null\n self.assertTrue(prd_null)\n\n # test the type of store field\n prd_type = prd._meta.get_field('store').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label store\n max_length = prd._meta.get_field('store').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label store\n prd_blank = prd._meta.get_field('store').blank\n self.assertTrue(prd_blank)\n # test null field in label store\n prd_null = prd._meta.get_field('store').null\n self.assertTrue(prd_null)\n\n # test the type of fat field\n prd_type = prd._meta.get_field('fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label fat max digits\n max_digits = prd._meta.get_field('fat').max_digits\n self.assertEqual(max_digits, 5)\n # label fat decimal places\n dec_places = prd._meta.get_field('fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label fat\n prd_blank = prd._meta.get_field('fat').blank\n self.assertTrue(prd_blank)\n # test null field in label fat\n prd_null = prd._meta.get_field('fat').null\n self.assertTrue(prd_null)\n\n # test the type of saturated_fat field\n prd_type = prd._meta.get_field('saturated_fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label saturated_fat max digits\n max_digits = prd._meta.get_field('saturated_fat').max_digits\n self.assertEqual(max_digits, 5)\n # label saturated_fat decimal places\n dec_places = prd._meta.get_field('saturated_fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label saturated_fat\n prd_blank = prd._meta.get_field('saturated_fat').blank\n self.assertTrue(prd_blank)\n # test null field in label saturated_fat\n prd_null = prd._meta.get_field('saturated_fat').null\n self.assertTrue(prd_null)\n\n # test the type of sugar field\n prd_type = prd._meta.get_field('sugar').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label sugar max digits\n max_digits = prd._meta.get_field('sugar').max_digits\n self.assertEqual(max_digits, 5)\n # label sugar decimal places\n dec_places = prd._meta.get_field('sugar').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label sugar\n prd_blank = prd._meta.get_field('sugar').blank\n self.assertTrue(prd_blank)\n # test null field in label sugar\n prd_null = prd._meta.get_field('sugar').null\n self.assertTrue(prd_null)\n\n # test the type of salt\n prd_type = prd._meta.get_field('salt').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label salt max digits\n max_digits = prd._meta.get_field('salt').max_digits\n self.assertEqual(max_digits, 5)\n # label salt decimal places\n dec_places = prd._meta.get_field('salt').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label salt\n prd_blank = prd._meta.get_field('salt').blank\n self.assertTrue(prd_blank)\n # test null field in label salt\n prd_null = prd._meta.get_field('salt').null\n self.assertTrue(prd_null)\n\n # test the type of prd_cat\n prd_type = prd._meta.get_field('prd_cat').get_internal_type()\n self.assertEqual(prd_type, 'ForeignKey')\n # label db_column\n fk = prd._meta.get_field('prd_cat').db_column\n self.assertEqual(fk, 'prd_cat')\n # test blank field in label prd_cat\n prd_blank = prd._meta.get_field('prd_cat').blank\n self.assertFalse(prd_blank)\n # test null field in label prd_cat\n prd_null = prd._meta.get_field('prd_cat').null\n self.assertFalse(prd_null)\n\n # Favourite table ----------------------------------------------------", "def test_amount_value(self):\n dict_with_value = self.info_list.get_value_info()\n self.assertEqual(dict_with_value['amount'], 26)", "def check_value(self, key: str, value: Any):\n # Check the value with a set of tests\n self._check_missing(key, value)\n self._check_allowed_values(key, value)\n self._check_data_type(key, value)\n self._check_value_range(key, value)", "def test_cleaned_data_for_compute_success(self):\n name = 'test_field'\n if isinstance(self.form.computed_fields, (list, tuple)):\n self.form.computed_fields = self.form.get_computed_fields([name])\n computed_names = list(self.form.computed_fields.keys())\n field_names = list(self.form.fields.keys())\n field_data = {f_name: f\"input_{f_name}_{i}\" for i, f_name in enumerate(field_names)}\n field_data.update({name: f\"value_{f_name}_{i}\" for i, f_name in enumerate(computed_names)})\n original_errors = deepcopy(self.form._errors)\n if self.form._errors is None:\n self.form._errors = ErrorDict() # mimic full_clean: _error is an ErrorDict\n original_cleaned_data = deepcopy(getattr(self.form, 'cleaned_data', None))\n populated_cleaned_data = deepcopy(original_cleaned_data or {})\n populated_cleaned_data.update(field_data)\n self.form.cleaned_data = populated_cleaned_data.copy() # ensure cleaned_data is present (mimic full_clean)\n final_cleaned_data = self.form.clean()\n\n self.assertIn(name, computed_names)\n self.assertNotIn(name, field_names)\n self.assertIn(name, populated_cleaned_data)\n self.assertIn(name, final_cleaned_data)\n self.assertNotEqual(original_cleaned_data, final_cleaned_data)\n\n if original_cleaned_data is None:\n del self.form.cleaned_data\n else:\n self.form.cleaned_data = original_cleaned_data\n self.form._errors = original_errors", "def test_contains_true(self):\n self.assertTrue('BarcodeSequence' in self.tester)\n self.assertTrue('barcodesequence' in self.tester)", "def test_getknowndata(self):\n result = recordparser.getfields(self.rawdata, self.fieldmap,\n self.sourcekeys)\n self.assertEqual(self.knownvalues, result)", "def test_age_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_age(input_val)\n self.assertEqual(output_val, self.line.age)", "def verifyData(self, expectedDict):\n pass", "def test_model_field_types(self):\n self.assertTrue(isinstance(self.UserInfo.have_siblings, str))\n self.assertTrue(isinstance(self.UserInfo.known_env_exposures, str))\n self.assertTrue(isinstance(self.UserInfo.known_genetic_mutations, str))\n self.assertTrue(isinstance(self.UserInfo.age, int))", "def input_has_value(step, field_name, value):\r\n with AssertContextManager(step):\r\n text_field = find_field(world.browser, 'text', field_name) or \\\r\n find_field(world.browser, 'textarea', field_name) or \\\r\n find_field(world.browser, 'password', field_name)\r\n assert_false(step, text_field is False,\r\n 'Can not find a field named \"%s\"' % field_name)\r\n assert_equals(text_field.get_attribute('value'), value)", "def test_get_field_state_comparisons_no_comp_states(self):\r\n self.assertRaises(ValueError, get_field_state_comparisons,\r\n self.dist_matrix_header, self.dist_matrix,\r\n self.mapping_header, self.mapping, self.field,\r\n [])", "def is_valid(self, value):\r\n pass", "def testGetAssignedValue(self):\n class SomeMessage(messages.Message):\n a_value = messages.StringField(1, default=u'a default')\n\n message = SomeMessage()\n self.assertEquals(None, message.get_assigned_value('a_value'))\n\n message.a_value = u'a string'\n self.assertEquals(u'a string', message.get_assigned_value('a_value'))\n\n message.a_value = u'a default'\n self.assertEquals(u'a default', message.get_assigned_value('a_value'))\n\n self.assertRaisesWithRegexpMatch(\n AttributeError,\n 'Message SomeMessage has no field no_such_field',\n message.get_assigned_value,\n 'no_such_field')", "def test_construct_values_raises_missing_cleaned_no_error(self):\n constructor_fields = ('first', 'second', 'last', )\n values = ['FirstValue', 'SecondValue', 'LastValue']\n cleaned_data = getattr(self.form, 'cleaned_data', {})\n cleaned_data.update(dict(zip(constructor_fields[:-1], values[:-1])))\n self.form.cleaned_data = cleaned_data\n err = \"This computed value can only be evaluated after fields it depends on have been cleaned. \"\n err += \"The field order must have the computed field after fields used for its value. \"\n with self.assertRaisesMessage(ImproperlyConfigured, err):\n self.form.construct_value_from_values(constructor_fields)", "def test_not_blank_validator_valid_value_should_return_true(self):\n for item in self.stdtype_fixtures:\n self.assertTrue(NotBlankValidator(TypeHint(item.get('type')), item.get('valid')))", "def test_generic_failed_code_value(self):\n value = 0\n\n for elem in self.test_generic_failed_code:\n self.assertEqual(value, elem)", "def test_value(mocker):\n transaction = Transaction(\n chain=0,\n nonce=0,\n fee=0,\n value=18_446_744_073_709_551_616,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n unlock_sig=Config.COINBASE_UNLOCK_SIGNATURE,\n )\n\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_VALUE\n ):\n transaction.validate(raise_exception=True)\n\n transaction.value = 18_446_744_073_709_551_615\n assert transaction.validate() == True\n assert transaction.validate(raise_exception=True) == True\n\n transaction.value = 9_551_615\n assert transaction.validate() == True\n assert transaction.validate(raise_exception=True) == True\n\n transaction.value = 0\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_VALUE\n ):\n transaction.validate(raise_exception=True)\n\n transaction.value = -1\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_VALUE\n ):\n transaction.validate(raise_exception=True)", "def validate_test_value(cls, value):\n for validator in cls._meta.get_field_by_name('value')[0].validators:\n validator(value)", "def test_form_has_fields(self):\r\n self.form = SubscriptionForm()\r\n expect = ['name', 'cpf', 'email', 'phone']\r\n self.assertSequenceEqual(expect, list(self.form.fields))" ]
[ "0.72825843", "0.69574255", "0.69196886", "0.68443435", "0.6773343", "0.67436814", "0.67333835", "0.67238384", "0.67176306", "0.6717321", "0.6710159", "0.6699414", "0.6687402", "0.6654058", "0.6643046", "0.65910715", "0.658521", "0.6562141", "0.65581733", "0.65155", "0.6512393", "0.65089256", "0.64989835", "0.6498652", "0.649103", "0.64795256", "0.64751154", "0.6438498", "0.6437359", "0.6435006", "0.6406418", "0.63866246", "0.6357509", "0.6345379", "0.63388383", "0.63241524", "0.63240963", "0.63240963", "0.6321307", "0.6313786", "0.63033706", "0.63021827", "0.629923", "0.6296597", "0.6282748", "0.62825704", "0.626095", "0.62559795", "0.62321156", "0.6231093", "0.6226776", "0.6217403", "0.62171537", "0.62158227", "0.62063575", "0.62061834", "0.6202314", "0.6194265", "0.61781025", "0.617268", "0.6170952", "0.6167675", "0.6167651", "0.6166027", "0.61644435", "0.61637014", "0.6162284", "0.61583614", "0.61564535", "0.6152685", "0.61503935", "0.61455756", "0.6145439", "0.6139863", "0.6125767", "0.6123118", "0.61226505", "0.61188966", "0.6118659", "0.61027145", "0.6100215", "0.6098613", "0.6093756", "0.6090222", "0.60873896", "0.6083985", "0.60790676", "0.6077646", "0.605891", "0.60557765", "0.6051345", "0.6050286", "0.604989", "0.6049396", "0.6044783", "0.60433036", "0.60429287", "0.6040576", "0.6039241", "0.60329825", "0.6027274" ]
0.0
-1
This test case checks negative scenario for the field value.
def test_props_fields_no_dash_not_empty( self, splunk_search_util, splunk_searchtime_fields_negative, record_property ): # Search Query record_property("stanza_name", splunk_searchtime_fields_negative["stanza"]) record_property("stanza_type", splunk_searchtime_fields_negative["stanza_type"]) record_property("fields", splunk_searchtime_fields_negative["fields"]) index_list = "(index=" + " OR index=".join(splunk_search_util.search_index.split(',')) + ")" search = ( f"search {index_list}" f" {splunk_searchtime_fields_negative['stanza_type']}=\"" f"{splunk_searchtime_fields_negative['stanza']}\"" ) fields_search = [] for field_dict in splunk_searchtime_fields_negative["fields"]: field = Field(field_dict) negative_values = ", ".join([f'"{each}"' for each in field.negative_values]) fields_search.append(f"({field} IN ({negative_values}))") search += " AND ({})".format(" OR ".join(fields_search)) search += " | stats count by sourcetype" self.logger.info(f"Executing the search query: {search}") # run search result, results = splunk_search_util.checkQueryCountIsZero(search) record_property("search", search) if not result: record_property("results", results.as_list) pp = pprint.PrettyPrinter(indent=4) result_str = pp.pformat(results.as_list[:10]) assert result, ( f"Query result greater than 0.\nsearch={search}\n" f"found result={result_str}" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_negative(self):\n self.assertFalse(validate_measure_input('-1', self.measures))", "def test_neg():\n value = -42\n num_a = param.Integer(value=value)\n assert -num_a.value == -value", "def test_negative_validation_decision(self, form_field_name, user_data):\n self.assertNotValidationDecision(\n {form_field_name: user_data},\n {form_field_name: ''}\n )", "def test_negative_input(self):\n negative_data_down = np.full_like(\n self.cube_uv_down.data, dtype=np.float32, fill_value=-0.1\n )\n negative_uv_down = self.cube_uv_down.copy(data=negative_data_down)\n msg = (\n \"The radiation flux in UV downward contains data \"\n \"that is negative or NaN. Data should be >= 0.\"\n )\n with self.assertRaisesRegex(ValueError, msg):\n calculate_uv_index(negative_uv_down)", "def test_negative_values_not_allowed(self, test_input, expected, sc):\n expected_err_msg = f'negatives not allowed {expected}'\n with pytest.raises(ValueError):\n sc.add(test_input)\n\n try:\n sc.add(test_input)\n except ValueError as e:\n assert str(e) == expected_err_msg", "def test_negative_values(self):\n rain = self.rain_prob_cube\n high_prob = self.high_prob_cube\n msg = \"Negative values of sleet probability have been calculated.\"\n with self.assertRaisesRegex(ValueError, msg):\n calculate_sleet_probability(rain, high_prob)", "def test_ui_check_answer_negative(capsys, test):\n assert 'Invalid input' in hl.test_help_ui_check_answer_negative(capsys,\n test)", "def test_doubleNegative(self):\n result = self.parser.parse(\"--15\")\n\n # TODO\n # self.assertIsNone(result)", "def test_negativexvalue(self):\n Square.reset_objects()\n with self.assertRaises(ValueError) as e:\n s1 = Square(1, -2)\n self.assertEqual(str(e.exception), \"x must be >= 0\")", "def test_negativeyvalue(self):\n Square.reset_objects()\n with self.assertRaises(ValueError) as e:\n s1 = Square(1, 2, -2)\n self.assertEqual(str(e.exception), \"y must be >= 0\")", "def test_calculate_correct_negative_num(self):\n result = self.calcuate.calcuate('2-5')\n expected_result = \"-3\"\n self.assertEqual(expected_result, result)", "def test_positive_validation_decision(self, form_field_name, user_data):\n self.assertValidationDecision(\n {form_field_name: user_data},\n {form_field_name: ''}\n )", "def test_negativeQuantity(self):\n result = self.parser.parse(\"-1d6\")\n\n # TODO\n # self.assertIsNone(result)", "def test_if_input_is_negative(self):\n self.assertEquals(prime_numbers(-5), \"Numbers less than or equal to zero are not allowed!\")", "def test_neg():\n # Test for negation with scalar Rnode object\n x = Rnode(5.0)\n z = -x\n try:\n assert z.value == -1 * x.value\n except AssertionError as e:\n print(e)\n raise AssertionError", "def test_age_is_positive(self):\n nt.assert_greater_equal(self.herb.age, 0)", "def is_Negative(self):\n return self.signature() < 0", "def test_negative_value(self):\n\n idf = { 'a': 3, 'b': -1 }\n self.assertRaises(ValueError, TFIDFScorer, idf, 3)", "def test_negative(self):\n\n input_ = -1\n expected = ValueError\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_duration_attribute_is_negative(self):\n d = DurationMixin(duration=10)\n\n with self.assertRaises(ValueError) as cm:\n d.duration = -10\n\n self.assertEqual(\n cm.exception.message,\n 'DurationMixin.duration should be an non-negative float'\n )", "def test_negative(self):\n lst = [-1, -5, -98]\n self.assertEqual(max_integer(lst), -1)", "def negative_test_value(self):\n self.assertRaises(ValueError, MyClass().my_func, None, [], \"a\")\n self.assertRaises(ValueError, MyClass().my_func, 1, None, \"a\")\n self.assertRaises(ValueError, MyClass().my_func, 1, [], None)\n self.assertRaises(ValueError, MyClass().my_func, a=None, b=[], c=\"a\")\n self.assertRaises(ValueError, MyClass().my_func, a=1, b=None, c=\"a\")\n self.assertRaises(ValueError, MyClass().my_func, a=1, b=[], c=None)", "def test_abs():\n value = -42\n num_a = param.Integer(value=value)\n assert abs(num_a.value) == abs(value)", "def test_is_valid_annotation_value_invalid_input():\n # test valid label values\n assert not is_valid_annotation_value(value=1)", "def test_subtract_all_args_less_zero(self):\n try:\n self.assertEqual(subtract(-18, -5), -13)\n except Exception as error:\n print(error)", "def negative(data):\n return _make.negative(data)", "def test_rschematic_negative(self):\n args_q = [\n # Invalid string as type\n (\"name\", \"str\", False),\n # Invalid string tuple for types\n (\"name\", (\"str\", int), False),\n # Invalid string tuple for types\n (\"name\", (\"str\", int), False),\n # Invalid string as nullable\n (\"name\", str, \"False\"),\n # Invalid int as nullable\n (\"name\", (str, int), 0),\n ]\n for args in args_q:\n self.rschematic_negative(args)", "def test_negatives(self):\n self.assertEqual(max_integer([-1, -2, -3, -4]), -1)\n self.assertEqual(max_integer([-4, -3, -2, 0]), 0)", "def test_hyphen(self):\n with self.assertRaises(ValidationError):\n field_name_validator('logstash-')", "def __neg__(self):\n return self.neg()", "def test_ui_menu_negative_1(test, capsys):\n assert 'Invalid input!' in hl.test_help_ui_menu_negative_1(test,\n capsys)", "def test_ui_menu_negative_2(test, capsys):\n assert 'Invalid input' in hl.test_help_ui_menu_negative_2(test,\n capsys)", "def test_subtract_zero_arg(self):\n try:\n self.assertEqual(subtract(0, -6), 7)\n except Exception as error:\n print(f'Got error in {inspect.stack()[0][3]}, {error}')", "def test_minus(self):\n self.assertEqual(1, minus(3, 2))", "def test_validation_negative(self):\n self.assertFalse(self.cut._validate_iban(\"FR14 2004 1010 0505 0001 3\"))\n self.assertFalse(self.cut._validate_iban(\"XX00 1234 5678 9012 3456 7890 1234 5678 90\"))\n self.assertFalse(self.cut._validate_iban(\"YY00123456789012345678901234567890\"))\n self.assertFalse(self.cut._validate_iban(\"XX22YYY1234567890123\"))\n self.assertFalse(self.cut._validate_iban(\"[email protected]\"))", "def test_does_not_have_value(self) -> None:\n self.assertFalse(LogLevels.has_value(1))", "def test_add_with_negative_amount(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"30\", \"-40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def validate_value(self, value):\n if value < 0:\n raise serializers.ValidationError(\"The value must be above 0.\")\n return value", "def test_positive_value_exists(self):\n #######################################\n # Test for True\n value_to_test = 1\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = 100\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = 'hello'\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = True\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = {\n 'success': True\n }\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = [\n 'success'\n ]\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n #######################################\n # Test for False\n value_to_test = 0\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = -1\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = ''\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = '0'\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = False\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = {}\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = []\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))", "def test_negative_pricing(self):\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -1.00)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -0.01)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0.00)\n try:\n Product(self.test_product_name, 1.00)\n Product(self.test_product_name, 0.01)\n except InvalidProductPriceException:\n self.fail(\"InvalidProductPriceException raised for positive value unexpectedly\")", "def is_negative(self, a):\n return a < 0", "def __neg__(self):\n return self.negated()", "def testSetWithNegativeString(self):\n def setSat():\n self.node.sat = '-20'\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSat\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSat()\n\n self.assertEqual(\n Decimal('0.0'),\n self.node.sat\n )", "def test_negative_case(self):\n self.assertRaises(ValueError, factorial, -3)", "def test_negative():\n assert is_leap_year(2010) is False", "def test_negativeSides(self):\n result = self.parser.parse(\"1d-6\")\n\n # TODO\n # self.assertIsNone(result)", "def test_details_id_neg(self):\n self.check_response(\n '/attributes/-1',\n ('Please enter a number that is 1 or greater for Attribute ID',))", "def test_minus(self):\n print('test_minus');\n self.assertEqual(90, minus(100, 10))", "def test_ui_menu_negative_3(capsys):\n assert 'Invalid input!' in hl.test_help_ui_menu_negative_3(capsys)", "def test_index_hostid_neg(self):\n self.check_response(\n '/attributes?h=-1',\n ('Please enter a number that is 1 or greater for Host ID'))", "def validate_positive(value: float):\n if value < 0:\n err = f\"{value} n`est pas positif\"\n raise ValidationError(err)", "def test_duration_argument_is_negative(self):\n with self.assertRaises(ValueError) as cm:\n DurationMixin(duration=-10)\n\n self.assertEqual(\n cm.exception.message,\n 'DurationMixin.duration should be an non-negative float'\n )", "def testSetPowerWithNegativeString(self):\n def setPower():\n self.node.power = '-20'\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setPower\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setPower()\n\n self.assertEqual(\n (Decimal('0.0'), Decimal('0.0'), Decimal('0.0')),\n self.node.power\n )", "def test_subtract_negative_result(self):\n\n a = random.randint(100, 1000)\n b = random.randint(10000, 100000)\n\n path = \"/subtract/{}/{}\".format(a, b)\n\n response = self.get_response(path)\n self.assertEqual(200, response.getcode())\n\n self.assertIn(str(a - b).encode(), response.read())", "def testSetWithNegativeInt(self):\n def setSat():\n self.node.sat = -20\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSat\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSat()\n\n self.assertEqual(\n Decimal('0.0'),\n self.node.sat\n )", "def test_subtract_positive_result(self):\n\n a = random.randint(10000, 100000)\n b = random.randint(100, 1000)\n\n path = \"/subtract/{}/{}\".format(a, b)\n\n response = self.get_response(path)\n self.assertEqual(200, response.getcode())\n\n self.assertIn(str(a - b).encode(), response.read())", "def text_max_negative(self):\n self.assertEqual(max_integer([-5, -3, -4, -8]), -3)", "def test_negation(self):\n\n a1 = tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 1, -2, 3, -4)\n\n a2 = -a1\n\n self.assertEqual(a2,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], -1, 2, -3, 4))", "def test_field_rules():", "def test_isolate_amount(self):\n self.assertIsNotNone(isolate_amount)", "def test_negatives(self):\n model = PoincareModel(self.data, negative=5)\n self.assertEqual(len(model._get_candidate_negatives()), 5)", "def test_y_is_less_than_0(self):\n with self.assertRaisesRegex(ValueError, \"y must be >= 0\"):\n Square(1, 0, -1)", "def test_minus(self):\n self.assertEqual(1, foo.minus(3, 2))", "def test_fail_balance_negative(self):\n self.bundle.transactions[3].value -= 1\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n 'Bundle has invalid balance (expected 0, actual -1).',\n ],\n )", "def test_contains_false(self):\n self.assertFalse('Not_a_Sample' in self.tester)", "def test_contains_false(self):\n self.assertFalse('Not_a_Sample' in self.tester)", "def test_unsuccessful_rating_with_negative_rate_value(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': -4},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data['errors']['rating'][0]),\n self.violate_min_value_error_message)", "def test_one_negative_number(self):\r\n given_n = -85\r\n total_n = 200\r\n\r\n with self.assertRaises(NegativeNumberException):\r\n n_percent(given_n, total_n)", "def check_for_negative_flowamounts(df):\n # return a warning if there are negative flowamount values\n if (df['FlowAmount'].values < 0).any():\n vLog.warning('There are negative FlowAmounts')\n\n return df", "def test_int_field():", "def test_neg_ordered(self):\n no_list = [-1, -2, -3, -4, -5]\n self.assertEqual(max_integer(no_list), -1)", "def testSetSlopeWithNegativeString(self):\n def setSlope():\n self.node.slope = '-20'\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSlope\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSlope()\n\n self.assertEqual(\n (Decimal('0.0'), Decimal('0.0'), Decimal('0.0')),\n self.node.slope\n )", "def test_value_error(self):\n self._error_test(ValueError)", "def testNegativeInt(self):\n self.assertRaises(messages.EnumDefinitionError,\n messages.Enum.def_enum,\n {'Bad': -1},\n 'BadEnum')", "def __neg__(self):\n return 0 - self", "def test_non_integral_validation(self):", "def test_non_integral_validation(self):", "def test_negative_numbers(self):\n\t\tself.assertTrue(prime_generator(-5), \"Negative numbers not allowed.\")", "def testInvalidValue(self):\n exp_str = 'test: Invalid value for --timing_measurements'\n exp_regex = r'^%s$' % re.escape(exp_str)\n with self.assertRaisesRegexp(flags.ValidationError, exp_regex):\n timing_util.ValidateMeasurementsFlag(['test'])", "def test_NegativePriceCheck(self):\n # Basic price check\n self.log.info(\"Price checking Negative Item via speedkey\")\n pos.click(\"Price Check\")\n pos.click_speed_key(\"Negative Item\")\n \n # Confirm the right item, at the right price\n # NOTE: Price check returns negative prices as possitive. Legacy defect deemed 'Will Not Fix'\n self.read_price_check(\"Negative Item\", \"$5.00\")\n # Add the item\n pos.click(\"Sell Item\")\n \n # Confirm we are in a transaction\n if not self.in_transaction():\n self.tc_fail(\"POS did not start a transaction; can not confirm item was added\")\n else:\n self.log.info(\"Confirmed we are in a transaction\")\n \n # Confirm we added the item, and that it was negative\n ret = self.confirm_line(-1, \"Negative Item\", \"-$5.00\")\n if ret == True:\n self.log.info(\"Confirmed item added\")\n else:\n self.tc_fail(ret)\n \n # Setup for next test\n self.recover()", "def check_value(self, value):", "def test_negative_volume(self):\n with pytest.raises(StateError):\n State(substance=\"water\", T=Q_(300, \"K\"), v=Q_(-10.13, \"m**3/kg\"))", "def test_error_on_negative_rate(self):\n self.ocp_data[\"rates\"][0][\"tiered_rates\"][0][\"value\"] = float(round(Decimal(random.random()), 6) * -1)\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def test_falsepositive(client):\n g.test_authorized_for = []\n res = client.get(\"/v0/falsepositive\" + get_request_args)\n assert \"Thanks! We’ve marked this as a false positive\" in res.data.decode(\"utf-8\")", "def check_not_negative(value):\n ivalue = int(value)\n if ivalue < 0:\n raise argparse.ArgumentTypeError(\"%s can't be less than 0\" % value)\n return ivalue", "def test_query_no_def_invalid(self):\n with self.assertRaises(ValueError) as context:\n query_yes_no(question=\"Is anyone wiser than Socrates?\", default=\"xxx\")", "def test_invalid_rating_value(self):\n url = reverse('rate-game')\n negative_rating = {\n 'igdb': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'cover_id': self.game.cover_id,\n 'backdrop_id': self.game.backdrop_id,\n 'rating': -1\n }\n big_rating = negative_rating\n big_rating['rating'] = 6\n\n negative = self.client.post(url, negative_rating, format='json')\n big = self.client.post(url, big_rating, format='json')\n\n self.assertEqual(negative.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(big.status_code, status.HTTP_400_BAD_REQUEST)", "def is_negative(self):\n return (self._num < 0)", "def testSetPowerWithNegativeInt(self):\n def setPower():\n self.node.power = -20\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setPower\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setPower()\n\n self.assertEqual(\n (Decimal('0.0'), Decimal('0.0'), Decimal('0.0')),\n self.node.power\n )", "def test_subtract(self):\n self.assertEqual(work_file.subtract(10, 5), 5)\n self.assertEqual(work_file.subtract(-1, 1), -2)\n self.assertEqual(work_file.subtract(-1, -1), 0)", "def test_withdraw_amount_view_with_negative_amount(self):\n self.account.current_balance = 100000\n self.account.save()\n\n client.force_authenticate(user=self.account.user, token=self.token)\n url = reverse('customer_withdraw')\n request = client.post(url, {'amount': -100}, format='json')\n self.assertEqual(400, request.status_code)", "def test_is_valid_label_value_valid_input():\n # test valid label values\n assert is_valid_label_value(value=None)\n assert is_valid_label_value(value=\"\")\n assert is_valid_label_value(value=\"l0L\")\n assert is_valid_label_value(value=\"L-l\")\n assert is_valid_label_value(value=\"L.L\")\n assert is_valid_label_value(value=\"l_4\")\n assert is_valid_label_value(value=\"4-you\")\n assert is_valid_label_value(value=\"You.2\")", "def test_subtract_all_args_greater_zero(self):\n try:\n self.assertEqual(subtract(30, 16), 15)\n except Exception as error:\n print(f'Got error in {inspect.stack()[0][3]}, {error}')", "def test_two_negative_numbers(self):\r\n given_n = -85\r\n total_n = -200\r\n\r\n with self.assertRaises(NegativeNumberException):\r\n n_percent(given_n, total_n)", "def negint_p(value):\n # check if the value has the expected type\n if type(value) is not int:\n raise Invalid(\"invalid value type {value}\".format(value=value))\n if value >= 0:\n raise Invalid(\"invalid value {value}, negative integer expected\".format(value=value))", "def __neg__(self):\n return Quantity(-(self._value), self.unit)", "def test_negativesize(self):\n Square.reset_objects()\n with self.assertRaises(ValueError) as e:\n s1 = Square(-1)\n self.assertEqual(str(e.exception), \"width must be > 0\")", "def test_positive_price_details(self):\n with self.client:\n response = self.add_meal(\"beef\", -15000)\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Price must be a positive number\")\n self.assertEqual(response.status_code, 400)", "def test_negative_temperature(self):\n with pytest.raises(StateError):\n State(substance=\"water\", T=Q_(-100, \"K\"), p=Q_(101325, \"Pa\"))", "def test_allow_none():\n value = None\n num_a = param.Integer(value=value, allow_None=True)\n assert num_a.value == value", "def test_creation_notallow_none():\n with pytest.raises(ValueError) as __:\n value = None\n __ = param.Integer(value=value, allow_None=False)" ]
[ "0.77282876", "0.74175817", "0.7015174", "0.701334", "0.6887958", "0.6836617", "0.67478794", "0.66586304", "0.6655187", "0.66419053", "0.66048115", "0.65991557", "0.655143", "0.6464886", "0.64405125", "0.6340747", "0.6301072", "0.6296137", "0.6294943", "0.6275951", "0.62708426", "0.6263486", "0.62496716", "0.623192", "0.62198675", "0.61940694", "0.6188184", "0.6170291", "0.61702514", "0.6166713", "0.61649054", "0.61648077", "0.6142477", "0.61416787", "0.6132195", "0.61236596", "0.61179906", "0.6103278", "0.61021143", "0.60849494", "0.60832125", "0.60812247", "0.6074797", "0.60595423", "0.60515", "0.6049422", "0.60481244", "0.6037401", "0.6034012", "0.60237163", "0.60181904", "0.6015083", "0.6011919", "0.5998794", "0.59931815", "0.5988179", "0.5987997", "0.59718585", "0.5967815", "0.59650415", "0.5964574", "0.59527814", "0.59516346", "0.5949945", "0.5947632", "0.5947632", "0.5943944", "0.5943673", "0.594251", "0.5937894", "0.59377366", "0.59366727", "0.5935842", "0.59280515", "0.5927629", "0.591371", "0.591371", "0.59112406", "0.59090316", "0.59070235", "0.5904678", "0.5887454", "0.5884295", "0.5868653", "0.5862464", "0.58566266", "0.5854067", "0.5852927", "0.58506024", "0.58376235", "0.58344597", "0.58314973", "0.5826338", "0.58166885", "0.5807552", "0.58058923", "0.5797609", "0.5795476", "0.57902426", "0.5784294", "0.5782049" ]
0.0
-1
Test case to check tags mentioned in tags.conf This test case checks if a tag is assigned to the event if enabled, and also checks that a tag is not assigned to the event if disabled.
def test_tags( self, splunk_search_util, splunk_searchtime_fields_tags, record_property, caplog ): is_tag_enabled = splunk_searchtime_fields_tags.get("enabled", True) tag_query = splunk_searchtime_fields_tags["stanza"] tag = splunk_searchtime_fields_tags["tag"] self.logger.info(f"Testing for tag {tag} with tag_query {tag_query}") record_property("Event_with", tag_query) record_property("tag", tag) record_property("is_tag_enabled", is_tag_enabled) index_list = "(index=" + " OR index=".join(splunk_search_util.search_index.split(',')) + ")" search = f"search {index_list} {tag_query} AND tag={tag}" search += " | stats count by sourcetype" self.logger.info(f"Search: {search}") result = splunk_search_util.checkQueryCountIsGreaterThanZero( search, interval=splunk_search_util.search_interval, retries=splunk_search_util.search_retry ) record_property("search", search) if is_tag_enabled: assert result, ( f"No events found for the enabled Tag={tag}." f"\nsearch={search}" f"\ninterval={splunk_search_util.search_interval}, retries={splunk_search_util.search_retry}" ) else: assert not result, ( f"Events found for the disabled Tag={tag}." f"\nsearch={search}" f"\ninterval={splunk_search_util.search_interval}, retries={splunk_search_util.search_retry}" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_tags(question):\n assert \"tags\" in question[\"instance\"]\n tags = set(question[\"instance\"][\"tags\"])\n # there should be at least one tag\n assert len(tags) >= 1\n # each tags should be in VALID_TAGS\n assert len(tags - VALID_TAGS) == 0\n # there should be exactly one category-defining tag\n assert len(tags.intersection(CATEGORY_TAGS)) == 1", "def check_for_tag(tags, tagged_events):\n found_tags = set()\n tags_set = set(tags)\n for tag in tags:\n for tag_event in tagged_events:\n if tag in tag_event[1][\"tag\"][\"labels\"]:\n found_tags.add(tag)\n not_found = tags_set - found_tags\n tag_status = {}\n for tag in found_tags:\n tag_status[tag] = True\n for tag in not_found:\n tag_status[tag] = False\n return tag_status", "def test_01_Tags(self):\n # print(PrettyFormatAny.form(self.m_xml, 'A1-01-A - Tags'))\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)", "def test_tags_listed(self):\n now_utc = datetime.now().utcnow()\n one_hour_before_utc = now_utc - timedelta(hours=1)\n two_hour_before_utc = now_utc - timedelta(hours=2)\n\n self.t(\"track {:%Y-%m-%dT%H:%M:%S}Z - {:%Y-%m-%dT%H:%M:%S}Z foo\".format(two_hour_before_utc, one_hour_before_utc))\n self.t(\"track {:%Y-%m-%dT%H:%M:%S}Z - {:%Y-%m-%dT%H:%M:%S}Z bar\".format(one_hour_before_utc, now_utc))\n\n code, out, err = self.t(\"tags\")\n\n self.assertIn('foo', out)\n self.assertIn('bar', out)", "def test_tags(self):\n actors = [Actor.remote(i, maybe_crash=False) for i in range(4)]\n manager = FaultTolerantActorManager(actors=actors)\n\n manager.foreach_actor_async(lambda w: w.ping(), tag=\"pingpong\")\n manager.foreach_actor_async(lambda w: w.call(), tag=\"call\")\n time.sleep(1)\n results_ping_pong = manager.fetch_ready_async_reqs(\n tags=\"pingpong\", timeout_seconds=5\n )\n results_call = manager.fetch_ready_async_reqs(tags=\"call\", timeout_seconds=5)\n self.assertEquals(len(list(results_ping_pong)), 4)\n self.assertEquals(len(list(results_call)), 4)\n for result in results_ping_pong:\n data = result.get()\n self.assertEqual(data, \"pong\")\n self.assertEqual(result.tag, \"pingpong\")\n for result in results_call:\n data = result.get()\n self.assertEqual(data, 1)\n self.assertEqual(result.tag, \"call\")\n\n # test with default tag\n manager.foreach_actor_async(lambda w: w.ping())\n manager.foreach_actor_async(lambda w: w.call())\n time.sleep(1)\n results = manager.fetch_ready_async_reqs(timeout_seconds=5)\n self.assertEquals(len(list(results)), 8)\n for result in results:\n data = result.get()\n self.assertEqual(result.tag, None)\n if isinstance(data, str):\n self.assertEqual(data, \"pong\")\n elif isinstance(data, int):\n self.assertEqual(data, 2)\n else:\n raise ValueError(\"data is not str or int\")\n\n # test with custom tags\n manager.foreach_actor_async(lambda w: w.ping(), tag=\"pingpong\")\n manager.foreach_actor_async(lambda w: w.call(), tag=\"call\")\n time.sleep(1)\n results = manager.fetch_ready_async_reqs(\n timeout_seconds=5, tags=[\"pingpong\", \"call\"]\n )\n self.assertEquals(len(list(results)), 8)\n for result in results:\n data = result.get()\n if isinstance(data, str):\n self.assertEqual(data, \"pong\")\n self.assertEqual(result.tag, \"pingpong\")\n elif isinstance(data, int):\n self.assertEqual(data, 3)\n self.assertEqual(result.tag, \"call\")\n else:\n raise ValueError(\"data is not str or int\")\n\n # test with incorrect tags\n manager.foreach_actor_async(lambda w: w.ping(), tag=\"pingpong\")\n manager.foreach_actor_async(lambda w: w.call(), tag=\"call\")\n time.sleep(1)\n results = manager.fetch_ready_async_reqs(timeout_seconds=5, tags=[\"incorrect\"])\n self.assertEquals(len(list(results)), 0)\n\n # now test that passing no tags still gives back all of the results\n results = manager.fetch_ready_async_reqs(timeout_seconds=5)\n self.assertEquals(len(list(results)), 8)\n for result in results:\n data = result.get()\n if isinstance(data, str):\n self.assertEqual(data, \"pong\")\n self.assertEqual(result.tag, \"pingpong\")\n elif isinstance(data, int):\n self.assertEqual(data, 4)\n self.assertEqual(result.tag, \"call\")\n else:\n raise ValueError(\"result is not str or int\")", "def checktags(testcase, tagexp):\n return eval(tagexp, None, getvar(testcase))", "def test_add_or_update_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'tag1': 'value1', 'pre-existing-1': 'modified'}}\n ],\n })\n p.run()\n\n # verify modified tags\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'tag1': 'value1', 'pre-existing-1': 'modified', 'pre-existing-2': 'unmodified'})", "def test_02_Tags(self):\n # print(PrettyFormatAny.form(self.m_xml, 'Xml'))\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)\n self.assertEqual(self.m_xml.computer_div.tag, 'ComputerDivision')\n self.assertEqual(self.m_xml.house_div.tag, 'HouseDivision')\n self.assertEqual(self.m_xml.lighting_sect.tag, 'LightingSection')\n self.assertEqual(self.m_xml.button_sect.tag, 'ButtonSection')\n self.assertEqual(self.m_xml.button.tag, 'Button')\n self.assertEqual(self.m_xml.controller_sect.tag, 'ControllerSection')\n self.assertEqual(self.m_xml.controller.tag, 'Controller')\n self.assertEqual(self.m_xml.light_sect.tag, 'LightSection')\n self.assertEqual(self.m_xml.light.tag, 'Light')", "def test_service_tags(self):\n\n service_def = self.create_service()\n self.assertEqual(service_def.tags, {})\n\n service_def = self.create_service(tags='{hi: ~, quit: bye}')\n self.assertEqual(service_def.tags, {'hi': None, 'quit': 'bye'})", "def test_page_tags(self):\n page, page_2 = self.get_pages()\n page_tags = models.PageTags.objects.create(extended_object=page)\n page_tags.tags.add(*self.tag_strings)\n\n self.assertTrue(page_has_tag(page, slugify(self.tag_strings[0])))\n self.assertTrue(page_has_tag(page, Tag.objects.get(slug=slugify(self.tag_strings[0]))))\n self.assertEqual(set(self.tag_strings), {tag.name for tag in get_page_tags(page)})\n\n self.assertFalse(page_has_tag(page_2, slugify(self.tag_strings[0])))\n self.assertEqual(set(), {tag.name for tag in get_page_tags(page_2)})", "def check_tags(self):\n if(self.tags is None or not self.tags.get('subscriber', False)):\n self.filters |= Filters.NonSubs\n\n if(self.tags is None or not self.tags.get('user-type', 0) > 0):\n self.filters |= Filters.NonMods", "def testGetEventTagByEventIdentifier(self):\n redis_client = self._CreateRedisClient()\n\n test_store = redis_store.RedisStore(\n storage_type=definitions.STORAGE_TYPE_TASK)\n test_store.Open(redis_client=redis_client)\n\n index = 0\n for event, event_data, event_data_stream in (\n containers_test_lib.CreateEventsFromValues(self._TEST_EVENTS)):\n test_store.AddAttributeContainer(event_data_stream)\n\n event_data.SetEventDataStreamIdentifier(\n event_data_stream.GetIdentifier())\n test_store.AddAttributeContainer(event_data)\n\n event.SetEventDataIdentifier(event_data.GetIdentifier())\n test_store.AddAttributeContainer(event)\n\n if index == 1:\n event_tag = events.EventTag()\n event_tag.AddLabels(['Malware', 'Benign'])\n\n event_identifier = event.GetIdentifier()\n event_tag.SetEventIdentifier(event_identifier)\n test_store.AddAttributeContainer(event_tag)\n\n index += 1\n\n test_store.Close()\n\n test_store = redis_store.RedisStore(\n storage_type=definitions.STORAGE_TYPE_TASK)\n test_store.Open(redis_client=redis_client)\n\n test_event = test_store.GetAttributeContainerByIndex(\n events.EventObject.CONTAINER_TYPE, 1)\n # Note that this method is not implemented.\n self.assertIsNone(test_event)\n\n test_store.Close()", "def test_single_tag(self):\n self.request.log(\"Hello World\", tags=[\"tag1\"])\n self.request.end()\n entry = self.get_entry()\n assert len(entry['tags']) == 1\n assert entry['tags'][0] == \"tag1\"", "def test_enable_extension_registers_template_tags(self):\n class TestExtension(Extension):\n __module__ = 'djblets.extensions.test.templatetag_tests.__init__'\n\n templatetags_module = \\\n 'djblets.extensions.test.templatetag_tests.templatetags'\n\n def _check_state(enabled):\n if enabled:\n if get_templatetags_modules:\n self.assertIn(templatetags_module,\n get_templatetags_modules())\n\n self.assertEqual(\n Template(template_str).render(Context({})),\n 'Hello, world!')\n else:\n if get_templatetags_modules:\n self.assertNotIn(templatetags_module,\n get_templatetags_modules())\n\n with self.assertRaisesRegexp(TemplateSyntaxError,\n 'is not a (valid|registered) tag '\n 'library'):\n Template(template_str).render(Context({}))\n\n template_str = (\n '{% load templatetag_tests %}'\n '{% my_extension_template_tag %}'\n )\n\n # Sanity-check that the template tag module isn't registered.\n _check_state(enabled=False)\n\n # Enabling the extension should register the template tags module and\n # clear the cache.\n extension = self.setup_extension(TestExtension)\n\n _check_state(enabled=True)\n\n # Shutting down the extension should remove the template tags module\n # and clear the cache.\n self.manager.disable_extension(extension.id)\n\n _check_state(enabled=False)\n\n # Other libraries should still work.\n Template('{% load djblets_js djblets_extensions %}').render(\n Context({}))", "def tag_check():\n\n async def check(ctx):\n if ctx.author.id == ctx.bot.owner_id:\n return True\n\n is_allowed = (\n ctx.author.guild_permissions.administrator\n or await ctx.bot.get_guild_setting(ctx.guild.id, \"tag_creation_allowed\")\n )\n\n if is_allowed:\n return True\n else:\n raise exceptions.DemocracivBotException(\n message=f\"{config.NO} Only Administrators can add or remove tags on this server.\"\n \" Administrators can change this setting in \"\n f\"`{config.BOT_PREFIX}server tagcreation`.\"\n )\n\n return commands.check(check)", "def test_multiple_tags(self):\n self.request.log(\"Hello World\", tags=[\"tag1\", \"tag2\"])\n self.request.end()\n entry = self.get_entry()\n assert len(entry['tags']) == 2\n assert entry['tags'][0] == \"tag1\"\n assert entry['tags'][1] == \"tag2\"", "def test_create_tags_successfull(self):\n payload = {'name': 'Test Tag'}\n self.client.post(TAG_URL, payload)\n exists = Tag.objects.filter(user=self.user, name = payload['name']).exists()\n self.assertTrue(exists)", "def test_tags(self, mock_requests):\n app_id = self.create_app()\n\n # check default\n url = '/v2/apps/{app_id}/config'.format(**locals())\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200, response.data)\n self.assertIn('tags', response.data)\n self.assertEqual(response.data['tags'], {})\n\n # set some tags\n body = {'tags': json.dumps({'environ': 'dev'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 201, response.data)\n tags1 = response.data\n\n # check tags again\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200, response.data)\n self.assertIn('tags', response.data)\n tags = response.data['tags']\n self.assertIn('environ', tags)\n self.assertEqual(tags['environ'], 'dev')\n\n # set an additional value\n body = {'tags': json.dumps({'rack': '1'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 201, response.data)\n tags2 = response.data\n self.assertNotEqual(tags1['uuid'], tags2['uuid'])\n tags = response.data['tags']\n self.assertIn('rack', tags)\n self.assertEqual(tags['rack'], '1')\n self.assertIn('environ', tags)\n self.assertEqual(tags['environ'], 'dev')\n\n # read the limit again\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200, response.data)\n tags3 = response.data\n self.assertEqual(tags2, tags3)\n tags = response.data['tags']\n self.assertIn('rack', tags)\n self.assertEqual(tags['rack'], '1')\n self.assertIn('environ', tags)\n self.assertEqual(tags['environ'], 'dev')\n\n # unset a value\n body = {'tags': json.dumps({'rack': None})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 201, response.data)\n tags4 = response.data\n self.assertNotEqual(tags3['uuid'], tags4['uuid'])\n self.assertNotIn('rack', json.dumps(response.data['tags']))\n\n # set valid values\n body = {'tags': json.dumps({'kubernetes.io/hostname': '172.17.8.100'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 201, response.data)\n body = {'tags': json.dumps({'is.valid': 'is-also_valid'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 201, response.data)\n body = {'tags': json.dumps({'host.the-name.com/is.valid': 'valid'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 201, response.data)\n body = {'tags': json.dumps({'host.the-name.com/does.no.exist': 'valid'})}\n response = self.client.post(url, body)\n self.assertContains(\n response,\n 'Addition of host.the-name.com/does.no.exist=valid is the cause',\n status_code=400\n )\n\n # set invalid values\n body = {'tags': json.dumps({'valid': 'in\\nvalid'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 400, response.data)\n body = {'tags': json.dumps({'host.name.com/notvalid-': 'valid'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 400, response.data)\n body = {'tags': json.dumps({'valid': 'invalid.'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 400, response.data)\n body = {'tags': json.dumps({'host.name.com/,not.valid': 'valid'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 400, response.data)\n long_tag = 'a' * 300\n body = {'tags': json.dumps({'{}/not.valid'.format(long_tag): 'valid'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 400, response.data)\n body = {'tags': json.dumps({'this&foo.com/not.valid': 'valid'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 400, response.data)\n\n # disallow put/patch/delete\n response = self.client.put(url)\n self.assertEqual(response.status_code, 405, response.data)\n response = self.client.patch(url)\n self.assertEqual(response.status_code, 405, response.data)\n response = self.client.delete(url)\n self.assertEqual(response.status_code, 405, response.data)", "def _verify_tags(self):\n for tag in self.tags:\n if tag.lower() in VASP_TAG_LIST:\n continue\n else:\n print((\"Warning: unknown INCAR tag '\" + tag + \"' with value '\" + str(self.tags[tag]) + \"'\"))", "def raise_for_disabled(self, disabled_tags: Collection[str]):\n tok = self.token()\n if tok.type == TOKEN_TAG and tok.value in disabled_tags:\n raise DisabledTagError(\n f\"{tok.value} usage is not allowed in this context\",\n linenum=tok.linenum,\n )", "def assert_tags_present(payload, tags_to_find):\n tags_found = []\n for tag_dict in payload['data']['tags']:\n tags_found.append(tag_dict.get('text'))\n for tag in tags_to_find:\n assert tag in tags_found\n return", "def test_badge_should_have_tags(self):\n\n badge = self.get_sample_badge()\n # It's a string, even though it is used as a URL\n self.assertIsInstance(badge.tags, list)", "def test_search_tags(self):\n page = self.page1\n page.search_tags = \"Chutes, Ladders\"\n page.save_revision().publish()\n taglist = page.clean_search_tags\n for name in [\"Chutes\", \"Ladders\"]:\n self.assertIn(name, taglist)", "def test_get_tag(self):\n self.seed_static_data()\n params = {'id': 1, 'event_id': 1}\n response = self.app.get('/api/v1/tag', headers=self.user1_headers, data=params)\n data = json.loads(response.data)\n self.assertEqual(data['id'], 1)\n self.assertEqual(data['event_id'], 1)\n self.assertEqual(data['tag_type'], 'RESPONSE')\n self.assertDictEqual(data['name'], {\n 'en': 'English Tag 1 Event 1',\n 'fr': 'French Tag 1 Event 1'\n })\n self.assertDictEqual(data['description'], {\n 'en': 'English Tag 1 Event 1 Description',\n 'fr': 'French Tag 1 Event 1 Description'\n })", "def is_tagged(self,tag_name,element):\n return (tag_name in self.tag2elements.keys()) and (element in self.tag2elements[tag_name])", "def test_title_tags(self):\n page, page_2 = self.get_pages()\n\n # Assign and test english tags\n title_en = page.get_title_obj(language=\"en\")\n title_en_tags = models.TitleTags.objects.create(extended_object=title_en)\n title_en_tags.tags.add(*self.tag_strings)\n\n self.assertTrue(title_has_tag(page, \"en\", slugify(self.tag_strings[0])))\n self.assertTrue(title_has_tag(page, \"en\", Tag.objects.get(slug=slugify(self.tag_strings[0]))))\n self.assertEqual(set(self.tag_strings), {tag.name for tag in get_title_tags(page, \"en\")})\n\n # Assign and test french tags\n title_fr = page.get_title_obj(language=\"fr\", fallback=False)\n title_fr_tags = models.TitleTags.objects.create(extended_object=title_fr)\n title_fr_tags.tags.add(*self.tag_strings_fr)\n self.assertTrue(title_has_tag(page, \"fr\", slugify(self.tag_strings_fr[0])))\n self.assertEqual(set(self.tag_strings_fr), {tag.name for tag in get_title_tags(page, \"fr\")})\n\n self.assertFalse(title_has_tag(page, \"it\", slugify(self.tag_strings_fr[0])))\n self.assertEqual(set(), {tag.name for tag in get_title_tags(page, \"it\")})", "def test_todos_by_tag(self):", "def test_add_or_update_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'value1'}\n ],\n })\n p.run()\n\n # verify that the a new tag is added without modifying existing tags\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'value1', 'testtag': 'testvalue'})", "def test_create_tag(self):\n\n tag_payload = {'name': 'Test Tag'}\n self.client.post(URL_TAGS, tag_payload)\n\n is_tag_created = Tag.objects.filter(\n user=self.user,\n name=tag_payload['name']\n ).exists()\n\n self.assertTrue(is_tag_created)", "def test_auto_tag_update_false_noop_for_existing_tag(self, utcnow_mock):\n\n # setup by adding an existing CreatorEmail tag\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'CreatorEmail',\n 'value': 'do-not-modify'},\n ],\n })\n p.run()\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'auto-tag-user',\n 'tag': 'CreatorEmail',\n 'update': False,\n 'days': 10}\n ],\n })\n p.run()\n\n # verify CreatorEmail tag was not modified\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags['CreatorEmail'], 'do-not-modify')", "def HasEventTags(self):\n for name in self._GetStreamNames():\n if name.startswith('event_tag_data.'):\n return True\n\n return False", "def check_tag(self, cr, uid, ids, code=None, name=None, context=None):\n assert bool(code is None) or bool(name is None), \"code or name must not be None\"\n tag_domain = [('id', 'in', ids)]\n if code is not None:\n tag_domain.append(('tag_ids.code', '=', code))\n if name is not None:\n tag_domain.append(('tag_ids.name', '=', name))\n\n count = self.search(cr, uid, tag_domain, count=1)\n return bool(count == len(ids))", "def can_tag(self):\n try:\n self.cork.require(role='beta-archivist')\n return True\n except Exception:\n return False", "def test_tags_filtered(self):\n self.t(\"track 20160101T0100 - 20160101T1000 foo\")\n self.t(\"track 20160104T0100 - 20160104T1000 bar\")\n\n code, out, err = self.t(\"tags 2016-01-02 - 2016-01-06\")\n\n self.assertNotIn('foo', out)\n self.assertIn('bar', out)", "def test_tag(provisioner, prov_data, template_name, provider):\n prov_data[\"vm_name\"] = \"test_prov_dlg_{}\".format(fauxfactory.gen_alphanumeric())\n prov_data[\"apply_tags\"] = [\n ([version.pick({version.LOWEST: \"Service Level\", \"5.3\": \"Service Level *\"}), \"Gold\"], True)]\n\n vm = provisioner(template_name, prov_data)\n\n tags = vm.get_tags()\n assert \"Service Level: Gold\" in tags, \"Service Level: Gold not in tags ({})\".format(str(tags))", "def test_tags_on_article(self):\n self.article.tags.add(self.tag1, self.tag2)\n self.assertEqual('Django', str(self.article.tags.all()[0]))", "def is_tag_available(self, tag):\n return tag in self.available_tags", "def _canProcessTags(self, grammar, pos_tags):\n badTags = []\n for tag in pos_tags:\n if tag not in grammar.tags:\n badTags.append(tag)\n logger.debug(\"Grammar can't handle tag:\" + tag)\n if badTags:\n return False\n else:\n return True", "def test_create_tag_successful(self):\n payload = {'name': 'Test tag'}\n self.client.post(TAGS_URL, payload)\n\n exists = Tag.objects.filter(user=self.user, name=payload['name']).exists()\n\n self.assertTrue(exists)", "def test_removal_does_not_raise_on_nonexistent_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag-does-not-exist']},\n ],\n })\n\n # verify initial tag set is empty\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})\n\n raised = False\n try:\n p.run()\n except KeyError:\n raised = True\n\n # verify no exception raised and no changes to tags on resource\n self.assertFalse(raised)\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})", "def test_add_tag(self):\n fc = self.read_feature(region='Adriatic_Sea')\n\n fc.tag(tags=['tag1', 'tag2', 'Mediterranean_Basin'])\n assert (fc.features[0]['properties']['tags'] ==\n 'Adriatic_Sea;Mediterranean_Basin;tag1;tag2')\n\n self.check_feature(fc.features[0])", "def has_tag(self, tag):\n return tag in self.tags", "def has_tag(self, tag):\n return tag in self.tags", "def test_create_tag_successful(self):\n payload = {'name': 'Test tag'}\n self.client.post(TAGS_URL, payload)\n exists = Tag.objects.filter(\n user=self.user,\n name=payload['name'],\n ).exists()\n\n self.assertTrue(exists)", "def test_create_tag_succesful(self):\n payload = {'name': 'Test tag'}\n res = self.client.post(TAGS_URL, payload)\n\n exists = Tag.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n self.assertTrue(exists)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "def test_tag_valid_image(self):\n alpine = self.docker.images.get(constant.ALPINE)\n self.assertTrue(alpine.tag(\"demo\", constant.ALPINE_SHORTNAME))\n\n alpine = self.docker.images.get(constant.ALPINE)\n for tag in alpine.tags:\n self.assertIn(\"alpine\", tag)", "def test_tags_tag_search_valid_tag(self,tag_with_items):\n\n tag = tag_with_items\n\n assert tag is not None, 'Could not find a tag with items'\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n\n # perform the search\n self.browser.proxy_client.new_har(\"page\")\n po.search_for_tags(tag)\n har_entry = self.browser.page_load_details()\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"performing a tag search using an the tag\" \\\n + \"'%s' returned an error response code on\" % (tag) \\\n + \"the page %s http archive follows:\\n%s\" \\\n % (po.current_url(),pprint.pformat(har_entry))\n\n # check for valid pagination total on tags view page\n po = self.catalog.load_pageobject('TagsViewPage')\n (start,end,total) = po.get_pagination_counts()\n\n assert total >= 0, \\\n \"performing a tag search using the tag\" \\\n + \"'%s' took user to page (%s) with invalid pagination\"\\\n % (tag,po.current_url())", "def check_for_tags(self, data_in):\n # possible header tags\n tags = ['NODE:', 'PORT:', 'STARTOFFSET:', 'ENDOFFSET:']\n\n # check for tags\n for tag in tags:\n if data_in.find(tag) != -1:\n print \"Found tag %s in data file\" % tag\n return False\n\n return True", "def add_tags(event):\n\n add_tags_from_presets()", "def test_create_tag_is_successful(self):\n\n payload = {\n 'name': 'Test Tag'\n }\n\n self.client.post(TAGS_URL, payload)\n\n exists = Tag.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n\n self.assertTrue(exists)", "def check_event_status(self):\n pass", "def test_create_services_with_tag(self):\n tag1 = sample_tag(user=self.user, name='Electrical')\n tag2 = sample_tag(user=self.user, name='Distribution')\n\n payload = {\n 'title' : 'Fitting Job',\n 'tags' : [tag1.id, tag2.id],\n 'price' : 100.00\n }\n\n res = self.client.post(SERVICES_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n services = Service.objects.get(id=res.data['id'])\n tags = services.tags.all()\n self.assertEqual(tags.count(), 2)\n self.assertIn(tag1, tags)\n self.assertIn(tag2, tags)", "def test_get_device_tags_by_id(self):\n pass", "def test_show_tags(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.get(\"/tags\")\r\n html = resp.get_data(as_text=True)\r\n tags = Tag.query.all()\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Tag List\", html)\r\n self.assertIn(tags[0].name, html)", "def test_create_recipe_with_tags(self):\n tag1 = sample_tag(user=self.user,name='vegan')\n tag2 = sample_tag(user=self.user, name='dessert')\n payload = {\n 'title':'cheesecake',\n 'tag':[tag1.id,tag2.id],\n 'time_minutes':60,\n 'price':10.00,\n }\n res = self.client.post(RECIPE_URL,payload)\n self.assertEqual(res.status_code,status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n tags = recipe.tag.all()\n self.assertEqual(len(tags),2)\n self.assertIn(tag1,tags)\n self.assertIn(tag2,tags)", "def test_get_device_tags_by_id1(self):\n pass", "def test_user_can_tag_to_text_annotation(self):\n response = self.client.get(reverse('image_lucida_app:add-tag-annotation', args={self.test_tag.pk}))\n self.assertEqual(response.status_code, 200)", "def tag_exists(tag, directory=None):\n return tag in get_tags(directory)", "def tags():", "def test_post_event_admin(self):\n self.seed_static_data()\n params = {\n 'event_id': 2,\n 'tag_type': 'RESPONSE',\n 'name': {\n 'en': 'English Tag 2 Event 2',\n 'fr': 'French Tag 2 Event 2',\n },\n 'description': {\n 'en': 'English Tag 2 Event 2 Description',\n 'fr': 'French Tag 2 Event 2 Description',\n }\n }\n # User 1 is not an event admin for event 2\n response = self.app.post(\n '/api/v1/tag', \n headers=self.user1_headers, \n data=json.dumps(params), \n content_type='application/json')\n self.assertEqual(response.status_code, 403)", "def test_tags_filter(client, example_records, h, prefix):\n # Test query (q=)\n res = client.get(f'{prefix}?tags=recommended', headers=h)\n assert res.status_code == 200\n assert res.json[\"hits\"][\"total\"] == 1", "def test_tags_limited_to_user_tags(self):\n\n user2 = create_user(\n fname='Test2',\n lname='User2',\n email='[email protected]',\n password='testpass2'\n )\n\n Tag.objects.create(user=user2, name='Vegan')\n tag = Tag.objects.create(user=self.user, name='Dessert')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def test_get_all_tags(self):\n print(self.session.tags)\n self.assertEqual(\n len(self.session.tags),\n (3 * len(self.session.wp_post_objects)) #3 tags added by default\n )", "def _get_tag_fixture():\n # just picked a few valid tags to try out as valid str return_tags args:\n test_str_as_arg = [\n \"X-y-must-have-same-index\",\n \"capability:pred_var\",\n \"skip-inverse-transform\",\n ]\n\n # we can also make them into a list to test list of str as a valid arg:\n test_list_as_arg = [test_str_as_arg]\n # Note - I don't include None explicitly as a test case - tested elsewhere\n return test_str_as_arg + test_list_as_arg", "def test_create_tag_successful(self):\n tag_data = {'name': 'Snack'}\n res = self.client.post(TAGS_URL, data=tag_data)\n\n exists = Tag.objects.filter(\n user=self.user,\n name=tag_data['name']\n ).exists()\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertTrue(exists)", "def test_create_tag_successful(self):\n payload = {'name':'Desserts'}\n res = self.client.post(TAGS_URL,payload)\n\n exist = Tag.objects.filter(\n user = self.user,\n name = payload['name']\n ).exists()\n\n self.assertTrue(exist)\n #self.assertEqual(res.status_code,status.HTTP_200_OK)", "def test_execute_tags_queries(self):\n test_cases = [\n {\"value\": \"-1\", \"unit\": \"month\", \"resolution\": \"monthly\"},\n {\"value\": \"-2\", \"unit\": \"month\", \"resolution\": \"monthly\"},\n {\"value\": \"-10\", \"unit\": \"day\", \"resolution\": \"daily\"},\n {\"value\": \"-30\", \"unit\": \"day\", \"resolution\": \"daily\"},\n ]\n\n for case in test_cases:\n url = reverse(\"azure-tags\")\n client = APIClient()\n params = {\n \"filter[resolution]\": case.get(\"resolution\"),\n \"filter[time_scope_value]\": case.get(\"value\"),\n \"filter[time_scope_units]\": case.get(\"unit\"),\n \"key_only\": False,\n }\n url = url + \"?\" + urlencode(params, quote_via=quote_plus)\n response = client.get(url, **self.headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n data = response.json().get(\"data\")\n\n self.assertTrue(data)\n self.assertTrue(isinstance(data, list))\n for tag in data:\n self.assertTrue(isinstance(tag, dict))\n self.assertIn(\"key\", tag)\n self.assertIn(\"values\", tag)\n self.assertIsNotNone(tag.get(\"key\"))\n self.assertIn(tag.get(\"values\").__class__, [list, str])\n self.assertTrue(tag.get(\"values\"))", "def _list_tags(self, expression):\n try:\n for tag in self.dockerioapi.get_tags(expression):\n Msg().out(tag)\n return self.STATUS_OK\n except (KeyError, TypeError, ValueError):\n return self.STATUS_ERROR", "def is_taggable(t: str) -> bool:\n return t in taggable_resource_types", "def test_create_recipe_with_tags(self):\n tag1 = sample_tag(user=self.user, name = 'Vegan')\n tag2 = sample_tag(user=self.user, name = 'Dessert')\n payload = {\n 'title': 'Avocado lime Cheesecake',\n 'tags': [tag1.id, tag2.id], # this is how tags are assigned\n 'time_minutes': 20,\n 'price': 20.00,\n }\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n tags = recipe.tags.all()\n\n self.assertEqual(tags.count(), 2)\n self.assertIn(tag1, tags)\n self.assertIn(tag2, tags)", "def test_remove_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'})\n\n p = self.load_policy({\n 'name': 'test-azure-remove-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['added-1', 'added-2']}\n ],\n })\n p.run()\n\n # verify tags removed and pre-existing tags not removed\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep'})", "def test_has_tagging_button(self):\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertTrue(context['show_tag_button'])\n self.assertContains(response, reverse('trait_browser:source:traits:tagging', kwargs={'pk': self.trait.pk}))", "def test_has_tagging_button(self):\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertTrue(context['show_tag_button'])\n self.assertContains(response, reverse('trait_browser:source:traits:tagging', kwargs={'pk': self.trait.pk}))", "def test_basic_av_by_tag(self):\n doc1 = Document.objects.create_document(\n title=\"doc1\",\n user=self.testcase_user,\n page_count=2,\n file_name=\"koko.pdf\",\n size='1111',\n lang='ENG',\n )\n doc2 = Document.objects.create_document(\n title=\"doc2\",\n user=self.testcase_user,\n page_count=2,\n file_name=\"kuku.pdf\",\n size='1111',\n lang='ENG',\n )\n doc1.tags.add(\n \"green\",\n \"blue\",\n tag_kwargs={'user': self.testcase_user}\n )\n doc2.tags.add(\n \"blue\",\n tag_kwargs={'user': self.testcase_user}\n )\n\n ret = self.client.get(\n reverse('admin:search'), {'tag': 'green'}\n )\n self.assertEqual(\n ret.status_code,\n 200\n )\n self.assertEqual(\n len(ret.context['results_docs']),\n 1\n )\n doc_ = ret.context['results_docs'][0]\n\n self.assertEqual(\n doc_.id,\n doc1.id\n )", "def match(self, name, tags):\n return name.lower() in tags", "def test_creating_recipe_with_tags(self):\n tag1 = sample_tags(user=self.user, name='Vegan')\n tag2 = sample_tags(user=self.user, name='Dessert')\n\n payload = {\n 'title': 'Avocado lime cheesecake',\n 'time_minutes': 60,\n 'price': 5000.00,\n 'currency': 'NGN',\n 'tags': [tag1.id, tag2.id]\n }\n self.evaluate_recipe(tag1, tag2, payload, 'tag')", "def testTagJobs(self):\n self.assertTrue(\"C#\" in self.app._tag_jobs(\"C#\"))\n self.assertTrue(\"C++\" in self.app._tag_jobs(\"c++\"))\n self.assertTrue(\"Objective C\" in self.app._tag_jobs(\"obj-c\"))\n self.assertTrue(\".NET\" in self.app._tag_jobs(\".NET\"))\n self.assertEqual(0, len(self.app._tag_jobs(\"random text to see\")))", "def tags_changed(self, tags):\n pass", "def make_tag_available(self, tag):\n if not self.is_tag_available(tag):\n self.available_tags.append(tag)\n return True\n return False", "def test_update_task_tags(\n self,\n mock_config_load,\n mock_custom_objects_api,\n mock_core_v1_api\n ):\n task_id = util.MOCK_UUID_5\n\n rv = TEST_CLIENT.patch(\n f\"/tasks/{task_id}\",\n json={\n \"tags\": [\"FEATURE_ENGINEERING\"],\n },\n )\n result = rv.json()\n expected = {\n \"uuid\": \"uuid-5\",\n \"name\": \"task-5\",\n \"description\": None,\n \"commands\": None,\n \"cpuLimit\": \"2000m\",\n \"cpuRequest\": \"100m\",\n \"arguments\": None,\n \"category\": \"MONITORING\",\n \"tags\": [\"FEATURE_ENGINEERING\"],\n \"dataIn\": None,\n \"dataOut\": None,\n \"docs\": None,\n \"hasNotebook\": False,\n \"image\": EXPERIMENT_IMAGE,\n \"memoryLimit\": \"10Gi\",\n \"memoryRequest\": \"2Gi\",\n \"parameters\": [],\n \"readinessProbeInitialDelaySeconds\": 60,\n \"createdAt\": mock.ANY,\n }\n machine_generated = [\"updatedAt\"]\n for attr in machine_generated:\n self.assertIn(attr, result)\n del result[attr]\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 200)", "def check_tag(self, session, tag):\n if not tag:\n return False\n\n try:\n self._tag(session.get, key=tag, session=session)\n return True\n except exceptions.NotFound:\n return False", "def pytest_bdd_apply_tag(tag, function):\n skip_tags = {\"skip\", \"skip-python\"}\n if RECORD != \"none\":\n # ignore integration-only scenarios if the recording is enabled\n skip_tags.add(\"integration-only\")\n if RECORD != \"false\":\n skip_tags.add(\"replay-only\")\n\n if tag in skip_tags:\n marker = pytest.mark.skip(reason=f\"skipped because of '{tag} in {skip_tags}\")\n marker(function)\n return True", "def test_get_event_admin_correct_event(self):\n self.seed_static_data()\n params = {'id': 1, 'event_id': 1}\n response = self.app.get('/api/v1/tag', headers=self.user2_headers, data=params)\n self.assertEqual(response.status_code, 403)", "def test_basic_av_by_tags_op_any(self):\n doc1 = Document.objects.create_document(\n title=\"doc1\",\n user=self.testcase_user,\n page_count=2,\n file_name=\"koko.pdf\",\n size='1111',\n lang='ENG',\n )\n doc2 = Document.objects.create_document(\n title=\"doc2\",\n user=self.testcase_user,\n page_count=2,\n file_name=\"kuku.pdf\",\n size='1111',\n lang='ENG',\n )\n doc3 = Document.objects.create_document(\n title=\"doc3\",\n user=self.testcase_user,\n page_count=2,\n file_name=\"momo.pdf\",\n size='1111',\n lang='ENG',\n )\n doc1.tags.add(\n \"red\",\n tag_kwargs={'user': self.testcase_user}\n )\n doc2.tags.add(\n \"green\",\n tag_kwargs={'user': self.testcase_user}\n )\n doc3.tags.add(\n \"blue\",\n tag_kwargs={'user': self.testcase_user}\n )\n\n base_url = reverse('admin:search')\n args = \"tag=red&tag=green&tags_op=any\"\n url = f\"{base_url}?{args}\"\n\n ret = self.client.get(url)\n\n self.assertEqual(\n ret.status_code,\n 200\n )\n self.assertEqual(\n len(ret.context['results_docs']),\n 2\n )\n result_ids = set(\n [doc_.id for doc_ in ret.context['results_docs']]\n )\n self.assertEqual(\n result_ids,\n set([doc1.id, doc2.id])\n )", "def test_no_tags(self):\n self.request.log(\"Hello World\")\n self.request.end()\n entry = self.get_entry()\n assert len(entry['tags']) == 0", "def validate_tag(tag=None):\n if not tag:\n raise AttributeError('Tag cannot be empty')\n\n if tag not in TAGS:\n raise ValueError('{0} tag is not supported')", "def test_add_tag_successful(self):\n payload = {'name': 'test tag'}\n self.client.post(TAGS_URL, payload)\n\n # self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n exists = Tag.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n self.assertTrue(exists)", "def setUp(self):\n self.platform = wirelesstagpy.WirelessTags(username=USERNAME, password=PASSWORD)\n self.tag_outdoor = wirelesstagpy.SensorTag(MOCK.OUTDOOR_PROBE, self.platform)\n self.platform._tags[\"fake-1\"] = self.tag_outdoor # pylint: disable=protected-access", "def testTags(self):\n project = self.session.create_project()\n\n tags = project.tags\n self.assertTrue(type(tags) == list, \"Project tags() method returns a list.\")\n self.assertEqual(len(tags), 0, \"Template project tags list is empty.\")\n\n new_tags = [\"tagA\", \"tagB\"]\n\n project.tags = new_tags\n self.assertEqual(project.tags, new_tags, \"Can set tags on a project.\")\n\n json_str = project.to_json()\n doc = json.loads(json_str)\n self.assertTrue('tags' in doc['meta'],\n \"JSON representation has 'tags' field in 'meta'.\")\n\n self.assertEqual(doc['meta']['tags'], new_tags,\n \"JSON representation had correct tags after setter.\")", "def test_create_recipe_with_tag(self):\n tag1 = sample_tag(user=self.user, name = 'Vegen')\n tag2 = sample_tag(user=self.user, name='Dessert')\n\n payload = {\n 'title': 'Avocado lime cheescake',\n 'tags' : [tag1.id, tag2.id],\n 'time_minuts': 50,\n 'price': 400\n }\n\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n tags = recipe.tags.all()\n self.assertEqual(tags.count(),2)\n self.assertIn(tag1, tags)\n self.assertIn(tag2,tags)", "def test_task_count_tags(self):\r\n tasks.count_tags()\r\n\r\n stat = StatBookmark.query.first()\r\n self.assertEqual(stat.attrib, stats.TAG_CT)\r\n self.assertEqual(stat.data, 4)", "def has_tags(self):\n return bool(self.tags)", "def test_verbose_new_tag(self):\n code, out, err = self.t(\"start foo bar\")\n\n self.assertIn(\"Note: 'foo' is a new tag.\", out)\n self.assertIn(\"Note: 'bar' is a new tag.\", out)", "def test_enabled(self):\n # OSA script should have been installed in setUp function, which sets\n # enabled to True by default.\n self.assertTrue(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))\n # Disable OSA Script\n self.run_function(\"assistive.enable\", [OSA_SCRIPT, False])\n # Assert against new disabled status\n self.assertFalse(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))", "def testGetAboutTagValuesWithTagValues(self):\n objectID1 = uuid4()\n objectID2 = uuid4()\n value = self.store.add(AboutTagValue(objectID1, u'foo'))\n self.store.add(AboutTagValue(objectID2, u'bar'))\n self.assertEqual(value, getAboutTagValues(values=[u'foo']).one())", "def test_given_that_I_add_a_user_and_insert_a_task_with_several_tags_I_can_access_tag_collection(self):\n from .models import Tag\n instance = self._makeOne(1,\n u'Find a shrubbery',\n [u'quest', u'ni', u'knight'])\n self.assertEqual(instance.tags[0].name, u'quest')\n self.assertEqual(instance.tags[1].name, u'ni')\n self.assertEqual(instance.tags[2].name, u'knight')", "def tag_push_events(self) -> bool:\n return pulumi.get(self, \"tag_push_events\")", "def testGetAboutTagValues(self):\n value1 = self.store.add(AboutTagValue(uuid4(), u'foo'))\n value2 = self.store.add(AboutTagValue(uuid4(), u'bar'))\n self.assertEqual(sorted([value1, value2]), sorted(getAboutTagValues()))", "def test_tagged_trait_button_present(self):\n tagged_traits = TaggedTraitFactory.create_batch(\n 10, trait__source_dataset__source_study_version__study=self.study)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n self.assertContains(response, reverse('trait_browser:source:studies:pk:traits:tagged', args=[self.study.pk]))", "def test_quiet_new_tag(self):\n code, out, err = self.t(\"start foo bar :quiet\")\n\n self.assertNotIn(\"Note: 'foo' is a new tag.\", out)\n self.assertNotIn(\"Note: 'bar' is a new tag.\", out)" ]
[ "0.65553594", "0.6546361", "0.62693006", "0.6008587", "0.59838474", "0.593703", "0.583689", "0.5791458", "0.5709085", "0.5685902", "0.56608677", "0.56162375", "0.56153977", "0.56052005", "0.5602202", "0.5592223", "0.5591245", "0.5573722", "0.5573409", "0.5533851", "0.55135083", "0.550472", "0.54923457", "0.54886615", "0.5477075", "0.54641473", "0.5452591", "0.5448131", "0.5438508", "0.54129565", "0.5410944", "0.5403516", "0.5402995", "0.53895897", "0.536317", "0.53419155", "0.5299263", "0.5291517", "0.52862865", "0.5284972", "0.52677786", "0.5265124", "0.5265124", "0.5258499", "0.5251053", "0.5247761", "0.52457094", "0.52449685", "0.52288646", "0.5219641", "0.52186596", "0.5206916", "0.51908517", "0.51888263", "0.5187835", "0.5176941", "0.5167995", "0.5156962", "0.51558304", "0.5150966", "0.51398635", "0.5135503", "0.513527", "0.51274043", "0.511906", "0.5118878", "0.51138574", "0.51085484", "0.51083887", "0.5107193", "0.51048416", "0.5102672", "0.5102672", "0.50985605", "0.5087009", "0.50848615", "0.5080553", "0.5073569", "0.507141", "0.5066117", "0.50655293", "0.50647557", "0.50645405", "0.50621825", "0.5062142", "0.50607336", "0.50602907", "0.50592875", "0.5057531", "0.50403005", "0.50394726", "0.50377893", "0.50214875", "0.501967", "0.5017977", "0.5017105", "0.50159806", "0.5012596", "0.50083756", "0.50072426" ]
0.6480311
2
Tests if all eventtypes in eventtypes.conf are generated in Splunk.
def test_eventtype( self, splunk_search_util, splunk_searchtime_fields_eventtypes, record_property, caplog, ): record_property( "eventtype", splunk_searchtime_fields_eventtypes["stanza"] ) index_list = "(index=" + " OR index=".join(splunk_search_util.search_index.split(',')) + ")" search = (f"search {index_list} AND " f"eventtype=" f"\"{splunk_searchtime_fields_eventtypes['stanza']}\"") search += " | stats count by sourcetype" self.logger.info( "Testing eventtype =%s", splunk_searchtime_fields_eventtypes["stanza"] ) self.logger.info("Search query for testing =%s", search) # run search result = splunk_search_util.checkQueryCountIsGreaterThanZero( search, interval=splunk_search_util.search_interval, retries=splunk_search_util.search_retry ) record_property("search", search) assert result, ( f"No result found for the search.\nsearch={search}\n" f"interval={splunk_search_util.search_interval}, retries={splunk_search_util.search_retry}" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_be_registered(self, event_type):\n return (event_type in self._watchable_events or\n (event_type == self.ANY and self._allow_any))", "def can_be_registered(self, event_type):\n return True", "def test_query_events_by_type(self):\n events = list(query_events_by_type(Event.objects.all(), 'show'))\n self.assertTrue(self.event_show1 in events)\n self.assertTrue(self.event_show2 in events)\n self.assertFalse(self.event_film in events)\n events = list(query_events_by_type(Event.objects.all(), 'film'))\n self.assertFalse(self.event_show1 in events)\n self.assertFalse(self.event_show2 in events)\n self.assertTrue(self.event_film in events)", "def is_eiffel_event_type(event, event_type):\n return event['meta']['type'] == event_type", "def included_event_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"included_event_types\")", "def included_event_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"included_event_types\")", "def included_event_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"included_event_types\")", "def test__ScheduledEventEntityType__metadata_type():\n for instance in ScheduledEventEntityType.INSTANCES.values():\n vampytest.assert_subtype(instance.metadata_type, ScheduledEventEntityMetadataBase)", "def check_event_status(self):\n pass", "def is_registered(self, name):\r\n\r\n return name in self.__events", "def test_otoroshi_controllers_adminapi_analytics_controller_filterable_events(self):\n pass", "def is_registered(self, event_type, callback, details_filter=None):\n listeners = self._topics.get(event_type, [])\n for listener in listeners:\n if listener.is_equivalent(callback, details_filter=details_filter):\n return True\n return False", "def test_query_events(self):\n query_list = {\n 'q': 'test',\n 'type': 'show'\n }\n results = query_events(query_list)\n events = list(results['events'])\n showcase = list(results['showcase_events'])\n self.assertTrue(self.event_show1 in events)\n self.assertTrue(self.event_show2 in showcase)\n self.assertFalse(self.event_film in events)", "def verify_configuration_types(config):\n if not isinstance(config[\"count\"], int):\n return False\n return True", "def __determine_config_type():", "def is_sent_from_sources(event, sources):\n if 'source' not in event['meta'] \\\n or event['meta']['source'] is None:\n return False\n\n if 'name' not in event['meta']['source'] \\\n or event['meta']['source']['name'] is None:\n return False\n\n return event['meta']['source']['name'] in sources", "def test__ScheduledEventEntityType__name():\n for instance in ScheduledEventEntityType.INSTANCES.values():\n vampytest.assert_instance(instance.name, str)", "def check_events(rk_settings, screen, rock, bullets):\r\n\tfor event in pygame.event.get():\r\n\t\tif event.type == pygame.QUIT:\r\n\t\t\tsys.exit()\r\n\t\t\r\n\t\telif event.type == pygame.KEYDOWN:\r\n\t\t\tcheck_keydown_events(event, rk_settings, screen, rock, bullets)\r\n\t\t\t\t\r\n\t\telif event.type == pygame.KEYUP:\r\n\t\t\tcheck_keyup_events(event, rock)", "def should_save(event) -> bool:\n schema = event[\"$schemaRef\"]\n if schema == \"https://eddn.edcd.io/schemas/commodity/3\":\n return True\n elif schema == \"https://eddn.edcd.io/schemas/journal/1\":\n return event[\"message\"][\"event\"] == \"Docked\"", "def included_event_types(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"included_event_types\")", "def can_trigger_notification(self, event_type):\n if event_type in self.DISALLOWED_NOTIFICATION_EVENTS:\n return False\n else:\n return True", "def test(types, _):\n return 'Date' in types and 'Postal Code' in types", "def test_get_events(self):\n events = gracedb.events()\n for event in events:\n self.assertTrue('graceid' in event)\n break", "def check_filterconfig(filterconfig, config):\n for f in filterconfig[\"filters\"]:\n if f[\"name\"] != \"frequency\":\n continue\n\n missing_freq_groups = set(iter_freq_groups(f[\"config\"][\"groups\"])) - set(\n iter_freq_groups(config[\"frequencies\"][\"groups\"])\n )\n assert not missing_freq_groups, \"Missing frequency group(s) in global config: {}\".format(\n missing_freq_groups\n )", "def insert_event_types(self, schema_name, db):\n for idx, e in enumerate(self.behave_names):\n id = db.one(self.eventIdSql(schema_name, e))\n if id is None:\n db.run(\n \"INSERT into %s.event_type VALUES (%d,'%s');\"\n % (schema_name, idx, e)\n )", "def is_event(self):\n return self._is_name_type(self.EVENT)", "def check_events(self):\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n self.ai_game.quit()\r\n elif event.type == pg.KEYDOWN:\r\n self._check_keydown_events(event)\r\n elif event.type == pg.KEYUP:\r\n self._check_keyup_events(event)\r\n elif event.type == pg.MOUSEBUTTONDOWN:\r\n mouse_pos = pg.mouse.get_pos()\r\n self._check_button(mouse_pos)", "def check_events(ai_settings, screen, ship, bullets):\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tsys.exit()\n\t\telif event.type == pygame.KEYDOWN:\n\t\t\t# each keypress is registered as a keydown event\n\t\t\tcheck_keydown_events(event, ai_settings, screen, ship, bullets)\t\t\n\t\telif event.type == pygame.KEYUP:\n\t\t\t# key release is a KEYUP event\n\t\t\tcheck_keyup_events(event, ship)", "def get_all(self):\r\n return list(pecan.request.storage_conn.get_event_types())", "def get_valid_subtypes(trigger_type: str) -> Optional[Sequence[str]]:\n for trigger_info in TRIGGER_CAPABILITIES.values():\n if trigger_info.conf == trigger_type:\n return trigger_info.subconfs\n return None", "def check_configs(self):\n\n pass", "def deployment_events(self) -> bool:\n return pulumi.get(self, \"deployment_events\")", "def _check_if_event_event_or_event_timex(self):\n if (type(self.source) is Timex and type(self.target) is Event) or (type(self.source) is Event and type(self.target) is Timex):\n self._is_event_timex = True\n elif type(self.source) is Event and type(self.target) is Event:\n self._is_event_event = True\n elif type(self.source) is Timex and type(self.target) is Timex:\n self._is_timex_timex = True", "def is_shed_tool_conf(self):", "def is_relevant(event):\n\n if \"Install\" not in event:\n return False\n for package in event[\"Install\"]:\n name = package[\"Name\"]\n if (\n installed.is_installed(name)\n and history.last_installed(name) == event[\"Start-Date\"]\n ):\n return True\n if (\n installed.is_multiarch_installed(name)\n and history.last_multiarch_installed(name) == event[\"Start-Date\"]\n ):\n return True\n return False", "def __contains__(self, errtype):\n return errtype in self._state", "def is_event(schema_obj):\n\n return isinstance(schema_obj, schema.Event)", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)", "def HasEventTags(self):\n for name in self._GetStreamNames():\n if name.startswith('event_tag_data.'):\n return True\n\n return False", "def test_get_future_events(self):\n events = list(get_future_events())\n self.assertFalse(self.event_show1 in events)\n self.assertTrue(self.event_show2 in events)", "def _check_config(self):", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_event(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_event(event)", "async def check_scheduled_events_exists(self) -> bool:\n\n mycursor, _ = await the_database()\n await mycursor.execute(\"SHOW TABLE STATUS LIKE 'ScheduledEvents'\")\n exists = await mycursor.fetchone()\n await mycursor.close()\n if exists:\n return True\n else:\n return False", "def issues_events(self) -> bool:\n return pulumi.get(self, \"issues_events\")", "def has_consumers(self, mime_type):\n suffix = isinstance(mime_type, MimeType) and '' or '__name'\n return 0 != self.consumers.filter(\n ** { 'mime_types%s' % suffix : mime_type } ).count()", "def check_watch(kls, id, email, event_type=None, locale=''):\n\n ct = ContentType.objects.get_for_model(kls)\n\n kwargs = {'content_type': ct, 'watch_id': id, 'email': email,\n 'locale': locale}\n if event_type:\n kwargs['event_type'] = event_type\n return EventWatch.uncached.filter(**kwargs).exists()", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n # if the exit button on screen is clicked close the program\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)", "def test_message_type_name_uniqueness(self):\n message_type_names = map(lambda x: x.name, KNOWN_MESSAGE_TYPES)\n for message_type_name in message_type_names:\n if self.is_verbose:\n print 'Checking uniqueness of message type name {0}'.format(message_type_name)\n self.assertEqual(1, len(filter(lambda x: x == message_type_name, message_type_names)))", "def _check_sensor_schema(conf):\n try:\n valid = [s.name for s in pysma.Sensors()]\n except (ImportError, AttributeError):\n return conf\n\n customs = list(conf[CONF_CUSTOM].keys())\n\n for sensor in conf[CONF_SENSORS]:\n if sensor in customs:\n _LOGGER.warning(\n \"All custom sensors will be added automatically, no need to include them in sensors: %s\",\n sensor,\n )\n elif sensor not in valid:\n raise vol.Invalid(f\"{sensor} does not exist\")\n return conf", "def is_event(event: Any) -> bool:\n return isinstance(event, MenuAction) or str(type(event)) == \"<class 'pygame_menu.events.MenuAction'>\"", "def check_config(cfg):", "def test_create_events_and_blocktypes(self):\n self.assertEqual(EventType.objects.all().count(), 0)\n self.assertEqual(BlockType.objects.all().count(), 0)\n\n management.call_command('create_event_and_blocktypes')\n self.assertEqual(EventType.objects.all().count(), 10)\n self.assertEqual(BlockType.objects.all().count(), 7)", "def test_registration(self):\n models = [BlogEntry, BlogRoll]\n pubsub.register(models)\n self.assertTrue(set(models).issubset(pubsub.registry))", "def check_events(ai_settings, screen, ship, bullets):\n\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit(\"You have quit the game.\")\n\t\t\t\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tcheck_keydown(event, ai_settings, screen, ship, bullets)\n\n\t\t\telif event.type == pygame.KEYUP:\n\t\t\t\tcheck_keyup(event, ship)", "def set_capture_events_from_config(self):\n\n event_config = [\n {\n \"config_key\": \"events_watchlist\",\n \"events\": [\n \"watchlist.hit.process\",\n \"watchlist.hit.binary\",\n \"watchlist.storage.hit.process\",\n \"watchlist.storage.hit.binary\"\n ],\n \"options\": self.forwarder_options.get(\"wlhitnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_feed\",\n \"events\": [\n \"feed.ingress.hit.process\",\n \"feed.ingress.hit.binary\",\n \"feed.ingress.hit.host\",\n \"feed.storage.hit.process\",\n \"feed.storage.hit.binary\",\n \"feed.query.hit.process\",\n \"feed.query.hit.binary\"\n ],\n \"options\": self.forwarder_options.get(\"feedhitnotif\", \"0\")\n },\n {\n \"config_key\": \"events_alert\",\n \"events\": [\n \"alert.watchlist.hit.ingress.process\",\n \"alert.watchlist.hit.ingress.binary\",\n \"alert.watchlist.hit.ingress.host\",\n \"alert.watchlist.hit.query.process\",\n \"alert.watchlist.hit.query.binary\"\n ],\n \"options\": self.forwarder_options.get(\"alertnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_raw_sensor\",\n \"events\": [\n \"ingress.event.process\",\n \"ingress.event.procstart\",\n \"ingress.event.netconn\",\n \"ingress.event.procend\",\n \"ingress.event.childproc\",\n \"ingress.event.moduleload\",\n \"ingress.event.module\",\n \"ingress.event.filemod\",\n \"ingress.event.regmod\"\n \t\"ingress.event.tamper\",\n \t\t\"ingress.event.crossprocopen\",\n \t\t\"ingress.event.remotethread\",\n \t\t\"ingress.event.processblock\",\n \t\t\"ingress.event.emetmitigation\",\n ],\n \"options\": self.forwarder_options.get(\"rawsensnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_binary_observed\",\n \"events\": [\"binaryinfo.host.observed\",\n \"binaryinfo.observed,\"\n \"binaryinfo.group.observed\"],\n\n \"options\": self.forwarder_options.get(\"binobsnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_binary_upload\",\n \"events\": [\"binarystore.file.added\"],\n \"options\": self.forwarder_options.get(\"binuplnotifenabled\", \"0\")\n }\n ]\n\n self.capture_events = []\n for event_type in event_config:\n events = self.forwarder_options.get(event_type[\"config_key\"], \"0\").lower()\n if events == \"all\":\n self.capture_events.extend(event_type[\"events\"])\n elif events != \"0\":\n events_from_config = events.split(\",\")\n events_to_capture = list(set(events_from_config) & set(event_type[\"events\"]))\n self.capture_events.extend(events_to_capture)\n\n self.logger.info(\"Configured to capture events: %s\" % self.capture_events)", "def audio_event_detection(self):\n # Test if trials already exist\n if 'TimeIntervals_speaker' not in self.model.nwb.intervals:\n # Test if file contains audio signals\n if any(name in self.model.nwb.stimulus for name in ['speaker1', 'speaker2']):\n AudioEventDetection(parent=self)\n else:\n NoAudioDialog()\n else:\n ExistIntervalsDialog()", "def _should_ignore_type(self, typ):\n return typ in self.config.IGNORED_TYPES", "def has_tuesday(self):\n return self.products.filter(type=\"S\", weekday=2).exists()", "def is_in(cls, trigger_type, existing_types):\n for e in existing_types:\n if cls.is_equivalent(trigger_type, e):\n return True\n return False", "def SBO_isEvent(*args):\n return _libsbml.SBO_isEvent(*args)", "def test_has_name(self):\n for klass in Event.__subclasses__():\n self.assertTrue(hasattr(klass, 'NAME'),\n f'{klass.__name__} is missing attribute NAME')", "def verify_event(event):\n event_details = event['event']\n file_subtype = event_details.get('subtype')\n\n if file_subtype != 'file_share':\n print('Not a file_shared event- ignoring event...')\n return False\n\n file_details = event_details['file']\n mime_type = file_details['mimetype']\n file_size = file_details['size']\n\n if mime_type not in SUPPORTED_IMAGE_FORMATS:\n print('File is not an image- ignoring event...')\n return False\n\n if file_size > MAX_SIZE:\n print(\n 'Image is larger than 5MB and cannot be processed- ignoring event...')\n return False\n\n return True", "def is_server_echo(event_code: str):\n return event_code in {\n \"txRequest\",\n \"nsfFail\",\n \"txRepeat\",\n \"txAwaitingApproval\",\n \"txConfirmReminder\",\n \"txSendFail\",\n \"txError\",\n \"txUnderPriced\",\n \"txSent\",\n }", "def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_event_document_types_responder(self):\n pass", "def is_registered(self, type):\n attr = self._type_to_attr(type)\n return getattr(self, attr, None) is not None", "def test_class_annotations():\n\n for cls in get_module_classes('HABApp.core.events.events', ('ComplexEventValue', 'AllEvents')).values():\n check_class_annotations(cls)", "def test_any(self):\n\n eventFilter = EventFilter(\"*\")\n\n # Start a session\n traceids = ['foobar']\n eventCallback = Mock()\n session = eventFilter.start(traceids, eventCallback)\n\n # The first FooEvent should be handled\n fooEvent1 = FooEvent(traceid=traceids)\n session.handle(fooEvent1)\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n ])\n\n # The second FooEvent should also be handled\n fooEvent2 = FooEvent(traceid=traceids)\n session.handle(fooEvent2)\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n call(fooEvent2),\n ])\n\n # The BarEvent should also be handled\n barEvent1 = BarEvent(traceid=traceids)\n session.handle(barEvent1)\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n call(fooEvent2),\n call(barEvent1),\n ])\n\n # No more events should be added when the session is finalized\n session.finalize()\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n call(fooEvent2),\n call(barEvent1),\n ])", "def _check_params(self, events):\n\n # check to make sure it's a list of dictionaries with the right keys\n\n assert type(events) == list, \"events should be a list\"\n\n for event in events:\n\n assert type(event) == dict, \"each event should be a dictionary\"\n\n assert \"name\" in event, 'each event should have a \"name\" key'\n\n assert \"params\" in event, 'each event should have a \"params\" key'\n\n # check for any missing or invalid parameters\n\n for e in events:\n event_name = e[\"name\"]\n event_params = e[\"params\"]\n if event_name in params_dict.keys():\n for parameter in params_dict[event_name]:\n if parameter not in event_params.keys():\n logger.warning(\n f\"WARNING: Event parameters do not match event type.\\nFor {event_name} event type, the correct parameter(s) are {params_dict[event_name]}.\\nFor a breakdown of currently supported event types and their parameters go here: https://support.google.com/analytics/answer/9267735\\n\"\n )", "def push_events(self) -> bool:\n return pulumi.get(self, \"push_events\")", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)", "def handle(self, event):\n try:\n for event_listeners in self.listeners[event.type]:\n if event_listeners:\n for listener in event_listeners:\n if listener(event) is False:\n return False\n except KeyError:\n logger.insane('No listeners defined for event \"%s\"', hr_event_type(event.type))\n pass\n\n return True", "def check_config(config):\n pass", "def validatesettings(self, eventlist=None):\n if (eventlist == None):\n eventlist = EventList()\n #\n self.validate_setting_config(eventlist, mconst.DEF_SETTINGNAME_pkgdirimps_sitempacks, False, \"no directory will be scanned for site-specific extensions.\")\n self.validate_setting_config(eventlist, mconst.DEF_SETTINGNAME_controllerroot, False, \"no site-default specified for controller root.\")\n # required stuff\n self.validate_setting_config(eventlist, mconst.DEF_SETTINGNAME_siteurl_relative, True, \"site has no relative url specified; assumed to start at root (/).\")\n self.validate_setting_config(eventlist, mconst.DEF_SETTINGNAME_siteurl_absolute, True, \"site has no absolute url address.\")\n self.validate_setting_config(eventlist, mconst.DEF_SETTINGNAME_sitefilepath, True, \"site has no filepath specified for it's home directory.\")\n\n # return events encountered\n return eventlist", "def _is_run_type(cls, object_):\n # Do a string comparison instead of using isinstance() to avoid needing\n # to import lyse or other modules with these classes.\n return (type(object_).__name__ in cls._RUN_TYPES)", "def isEvent(*args):\n return _libsbml.SBO_isEvent(*args)", "def is_eventCodePresent(self, *args):\n if args[0] is None:\n return True\n\n if len(args) == 1:\n if isinstance(args[0], list):\n eventCodes = {arg for arg in args[0]}\n else:\n eventCodes = args\n else:\n eventCodes = args\n \n for eventCode in eventCodes:\n if eventCode > 0:\n if hasattr(self,'_master_evr') and \\\n (not self._master_evr.is_in_keys \\\n or not self._master_evr.present(eventCode)):\n return False\n elif eventCode < 0:\n if hassattr(self,'_master_evr') and not self._master_evr.is_in_keys: \n return True\n elif self._master_evr.present(-eventCode):\n return False\n else:\n return False\n\n return True", "def test__ScheduledEventEntityType__value():\n for instance in ScheduledEventEntityType.INSTANCES.values():\n vampytest.assert_instance(instance.value, ScheduledEventEntityType.VALUE_TYPE)", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit() \n if event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)", "def _check_events(self):\n for event in pygame.event.get():\n # quit stuff\n if event.type == pygame.QUIT:\n sys.exit()\n # mouse click for 'PLAY' button\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = pygame.mouse.get_pos()\n self._check_play_button(mouse_pos)\n\n # checks for key down/up events and sends it to appropriate method\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)", "def test_all_configs_available():\n\n app_configs = application_services.get_configs()\n assert all(name in app_configs for name in ['TITLE', 'ENCODING', 'FLASK_LOG_LEVEL',\n 'SERVER_NAME', 'SERVER_HOST', 'SERVER_PORT',\n 'ENV', 'DEBUG', 'TESTING', 'UNIT_TESTING'])", "def check_events(si_settings, screen,stats,sb,play_button, ship,aliens, bullets):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, si_settings, screen, ship, bullets)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, ship)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_x,mouse_y = pygame.mouse.get_pos()\n check_play_button(si_settings,screen,stats,sb,play_button,ship,aliens,bullets,mouse_x,mouse_y)", "def test_get_types(self):\n pass", "def check_events(ai_settings,screen,stats,play_button,ship,bullets,shot):\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tsys.exit()\n\t\telif event.type == pygame.KEYDOWN:\n\t\t\tcheck_keydown_events(event,ai_settings,screen,ship,bullets,shot)\n\t\telif event.type == pygame.KEYUP:\n\t\t\tcheck_keyup_events(event, ship)\n\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\tmouse_x, mouse_y = pygame.mouse.get_pos()\n\t\t\tcheck_play_button(ai_settings,screen,stats,play_button,ship,bullets,mouse_x,mouse_y)", "def list_events(option, opt_str, value, parser):\n\n print 'On this system SystemConfiguration supports these events:'\n for event in sorted(SCDynamicStoreCopyKeyList(get_sc_store(), '.*')):\n print \"\\t\", event\n\n print\n print \"Standard NSWorkspace Notification messages:\\n\\t\",\n print \"\\n\\t\".join('''\n NSWorkspaceDidLaunchApplicationNotification\n NSWorkspaceDidMountNotification\n NSWorkspaceDidPerformFileOperationNotification\n NSWorkspaceDidTerminateApplicationNotification\n NSWorkspaceDidUnmountNotification\n NSWorkspaceDidWakeNotification\n NSWorkspaceSessionDidBecomeActiveNotification\n NSWorkspaceSessionDidResignActiveNotification\n NSWorkspaceWillLaunchApplicationNotification\n NSWorkspaceWillPowerOffNotification\n NSWorkspaceWillSleepNotification\n NSWorkspaceWillUnmountNotification\n '''.split())\n\n sys.exit(0)", "def is_hao_event(self, evt):\n return evt.detail and 'SRIOVPhysicalPort.ConfigChange' in evt.detail", "def needs_run(self, cscan, xnat):\n _info = cscan.info()\n if _info['type'] not in self.scan_types:\n return False\n\n # Check for existing EDAT resource\n if XnatUtils.has_resource(cscan, 'EDAT'):\n LOGGER.debug('Has EDAT')\n return False\n\n return True", "def event_filter(event):\n for field, blst in EXCLUDE_PARTIAL.items():\n ev_val = event[field]\n for bl_val in blst:\n if ev_val.find(bl_val) != -1: return False\n \n for field, blst in EXCLUDE_EXACT.items():\n ev_val = event[field]\n for bl_val in blst:\n if ev_val == bl_val: return False\n \n return True", "def is_in_keys(self):\n return self._name in self._data.evt._keys_dict", "def test_get_event_type_by_id(self):\n\t\tevent_type = EventType.objects.get(name=\"asd\")\n\t\tresponse = self.client.get('/api/event_type/esper/' + str(event_type.id), follow=True)\n\t\tjson_response = json.loads(response.content.decode('utf-8'))\n\t\tself.assertEqual(json_response[\"name\"], \"asd\")\n\t\tself.assertEqual(type(json_response[\"inputQueries\"]), list)\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_otoroshi_controllers_adminapi_events_controller_alert_events(self):\n pass", "def test_startup_defensive(self) -> None:\n self.assertFalse(self.client.triggers, 'predefined triggers')\n self.assertIsNone(self.client.websocket, 'websocket without triggers')", "def extract_all_types_from_event_trace(events):\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n result += [evt.type]\n return result", "def test_register_events():\n event_bus = MockEventBus()\n test_module.register_events(event_bus)\n assert event_bus.topic_patterns_to_subscribers", "def filter_event(event):\n tokens = event.split()\n# if tokens[-1] in SUBJS and tokens[-2] == \"tell\":\n# return True\n# if tokens[-1] in [\"know\", \"say\", \"think\"]:\n# return True\n # filter eventualities with only 2 tokens\n if len(tokens) <= 2:\n return True\n # filter hot verbs\n if any(kw in tokens for kw in [\"say\", \"do\", \"know\", \"tell\", \"think\", ]):\n return True\n # filter out errors that potentially due to the errors of the parser\n if tokens[0] in [\"who\", \"what\", \"when\", \"where\", \"how\", \"why\", \"which\", \"whom\", \"whose\"]:\n return True\n return False", "def test_get_event_types_by_hierarchy_id_success(self):\n\t\thierarchy = Hierarchy.objects.get(name=\"TestHierarchy\")\n\t\tresponse = self.client.get('/api/event_type/esper/hierarchy/' + str(hierarchy.id), follow=True)\n\t\tjson_response = json.loads(response.content.decode('utf-8'))\n\t\tself.assertEqual(type(json_response), list)\n\t\tevent_type = json_response[0]\n\t\tself.assertEqual(event_type[\"name\"], \"asd\")\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_started_but_not_finished_event_appears_in_events_list(self):\r\n user = ViewAfishaTests.mentor\r\n client_user = self.return_authorized_user_client(user)\r\n with freeze_time(\"2020-01-01\"):\r\n EventFactory(\r\n city=user.profile.city,\r\n start_at=datetime(2020, 2, 1, tzinfo=pytz.utc),\r\n end_at=datetime(2020, 12, 1, tzinfo=pytz.utc),\r\n )\r\n num_events = Event.objects.count()\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=\"Убедитесь, что тест смог создать событие в прошлом\",\r\n )\r\n with freeze_time(\"2020-05-01\"):\r\n response_data = client_user.get(EVENTS_URL, format=\"json\").data\r\n num_events = response_data.get(\"count\")\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=(\r\n \"Убедитесь, что начавшееся, но не \"\r\n \"закончившееся событие показывается в списке.\"\r\n ),\r\n )", "def _check_events(self):\n\t\t# Watch for keyboard and mouse events.\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tself._check_keydown_events(event)\n\t\t\telif event.type == pygame.KEYUP:\n\t\t\t\tself._check_keyup_events(event)\n\t\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tmouse_pos = pygame.mouse.get_pos()\n\t\t\t\tself._check_play_button(mouse_pos)", "def wants_event(self, event_name: str, args: Dict) -> bool:\n ret = True\n if self.event_filter and event_name not in self.event_filter:\n ret = False\n elif self.active_monitor_filter and 'monitor' in args and args['monitor'].monitor_type == 'active' \\\n and args['monitor'].id not in self.active_monitor_filter:\n ret = False\n return ret", "def check_events(self):\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n mouse_x, mouse_y = pygame.mouse.get_pos()\r\n if self._stats.get_status() == \"Start_game\":\r\n self.check_game_mode_button(mouse_x, mouse_y)\r\n elif self._stats.get_status() == \"replay\":\r\n self.check_replay_button(mouse_x, mouse_y)\r\n else:\r\n self.check_click(mouse_x, mouse_y)" ]
[ "0.6341547", "0.6339944", "0.61417574", "0.58374506", "0.57760566", "0.57760566", "0.57760566", "0.56579113", "0.5635613", "0.5614048", "0.557665", "0.55349207", "0.55108035", "0.5440711", "0.5404753", "0.540017", "0.5387301", "0.5341101", "0.53385955", "0.5323464", "0.5319075", "0.5299112", "0.52943337", "0.52874166", "0.5261569", "0.52446705", "0.5241322", "0.52358115", "0.5234367", "0.52301097", "0.522921", "0.5223781", "0.5197908", "0.5192373", "0.51841545", "0.5181028", "0.5169044", "0.51643384", "0.51643384", "0.51611876", "0.5146747", "0.5141542", "0.51238865", "0.51204145", "0.51175284", "0.51150084", "0.5097063", "0.5092647", "0.5086006", "0.5081939", "0.5075657", "0.50735134", "0.506944", "0.50644076", "0.5059173", "0.5052812", "0.5037386", "0.5027514", "0.5022315", "0.5015806", "0.5004067", "0.5001175", "0.5000127", "0.4976048", "0.49759397", "0.4973751", "0.4972339", "0.49712673", "0.49697214", "0.4968133", "0.49669427", "0.49528787", "0.49522522", "0.49520576", "0.49410817", "0.49384764", "0.492971", "0.49261856", "0.49231938", "0.49212757", "0.4921132", "0.49197784", "0.4910658", "0.48979804", "0.48807538", "0.48781794", "0.4878031", "0.48761478", "0.48728737", "0.4872211", "0.4870406", "0.48697817", "0.48679933", "0.48627463", "0.48625475", "0.48597765", "0.48571935", "0.48549765", "0.48528352", "0.48476982" ]
0.5889365
3
Create CIFAR100 train/val/test data loaders
def get_train_val_test_datasets( rnd: np.random.RandomState, root='~/data', validation_ratio=0.05, ) -> tuple: transform = transforms.Compose( [ transforms.ToTensor() ] ) train_set = CIFAR100( root=root, train=True, download=True, transform=transform ) # create validation split if validation_ratio > 0.: train_set, val_set = _train_val_split(rnd=rnd, train_dataset=train_set, validation_ratio=validation_ratio) # create a transform to do pre-processing train_loader = DataLoader( train_set, batch_size=len(train_set), shuffle=False, ) data = iter(train_loader).next() dim = [0, 2, 3] mean = data[0].mean(dim=dim).numpy() std = data[0].std(dim=dim).numpy() # end of creating a transform to do pre-processing transform = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize( mean, std ), ]) train_set.transform = transform if validation_ratio > 0.: val_set.transform = transform else: val_set = None test_set = CIFAR100( root=root, train=False, download=True, transform=transform ) return train_set, val_set, test_set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_dataloaders(params):\r\n transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465],\r\n [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor()])\r\n\r\n trainset = torchvision.datasets.CIFAR10(root=params['path'], train=True, transform=transform_train)\r\n testset = torchvision.datasets.CIFAR10(root=params['path'], train=False, transform=transform_validation)\r\n\r\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, num_workers=4)\r\n testloader = torch.utils.data.DataLoader(testset, batch_size=params['batch_size'], shuffle=False, num_workers=4)\r\n return trainloader, testloader", "def create_loader(self):\n # load data to memory.\n if self.is_cifar100:\n (x_train, y_train), (x_test,\n y_test) = tf.keras.datasets.cifar100.load_data()\n else:\n (x_train, y_train), (x_test,\n y_test) = tf.keras.datasets.cifar10.load_data()\n\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n\n x_train, y_train = shuffle_dataset(x_train, y_train)\n n_probe = int(math.floor(x_train.shape[0] * FLAGS.probe_dataset_hold_ratio))\n\n # TODO(zizhaoz): add other noise types.\n if 'asymmetric' in self.dataset_name:\n assert 'cifar100' not in self.dataset_name, 'Asymmetric only has CIFAR10'\n (x_train, y_train, y_gold), (x_probe, y_probe) = load_asymmetric(\n x_train,\n y_train,\n noise_ratio=self.noise_ratio,\n n_val=n_probe,\n random_seed=FLAGS.seed)\n elif 'uniform' in self.dataset_name:\n (x_train, y_train, y_gold), (x_probe,\n y_probe) = load_train_val_uniform_noise(\n x_train,\n y_train,\n n_classes=self.num_classes,\n noise_ratio=self.noise_ratio,\n n_val=n_probe)\n else:\n assert self.dataset_name in ['cifar10', 'cifar100']\n\n if not self.split_probe and x_probe is not None:\n # Usually used for supervised comparison.\n tf.logging.info('Merge train and probe')\n x_train = np.concatenate([x_train, x_probe], axis=0)\n y_train = np.concatenate([y_train, y_probe], axis=0)\n y_gold = np.concatenate([y_gold, y_probe], axis=0)\n\n conf_mat = sklearn_metrics.confusion_matrix(y_gold, y_train)\n conf_mat = conf_mat / np.sum(conf_mat, axis=1, keepdims=True)\n tf.logging.info('Corrupted confusion matirx\\n {}'.format(conf_mat))\n x_test, y_test = shuffle_dataset(x_test, y_test)\n self.train_dataset_size = x_train.shape[0]\n self.val_dataset_size = x_test.shape[0]\n if self.split_probe:\n self.probe_size = x_probe.shape[0]\n\n input_tuple = (x_train, y_train.squeeze())\n self.train_dataflow = self.create_ds(input_tuple, is_train=True)\n self.val_dataflow = self.create_ds((x_test, y_test.squeeze()),\n is_train=False)\n if self.split_probe:\n self.probe_dataflow = self.create_ds((x_probe, y_probe.squeeze()),\n is_train=True)\n\n tf.logging.info('Init [{}] dataset loader'.format(self.dataset_name))\n verbose_data('train', x_train, y_train)\n verbose_data('test', x_test, y_test)\n if self.split_probe:\n verbose_data('probe', x_probe, y_probe)\n\n return self", "def load_cifar():\n print('==> Preparing data..')\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n trainset = torchvision.datasets.CIFAR10(\n root='./data', train=True, download=True, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=1024, shuffle=True, num_workers=8)\n\n testset = torchvision.datasets.CIFAR10(\n root='./data', train=False, download=True, transform=transform_test)\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=128, shuffle=False, num_workers=8)\n return trainloader, testloader", "def load_cifar_data():\n train_loader = torch.utils.data.DataLoader(\n torchvision.datasets.CIFAR10('cifarfiles/', train=True, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=batch_size_train, shuffle=True, pin_memory=True)\n\n test_loader = torch.utils.data.DataLoader(\n torchvision.datasets.CIFAR10('cifarfiles/', train=False, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=batch_size_test, shuffle=True, pin_memory=True)\n return train_loader, test_loader", "def creates_data_loader():\n dataset_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=True)\n\n dataset_no_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=False)\n\n datasets_faces_split = train_val_test(dataset_faces, 0.2, 0.0)\n datasets_no_faces_split = train_val_test(dataset_no_faces, 0.2, 0.0)\n\n datasets = {}\n datasets[\"train\"] = datasets_faces_split[\"train\"] + \\\n datasets_no_faces_split[\"train\"]\n datasets[\"test\"] = datasets_no_faces_split[\"test\"]\n datasets[\"val\"] = datasets_faces_split[\"val\"] + \\\n datasets_no_faces_split[\"val\"]\n\n train_loader = DataLoader(dataset=datasets[\"train\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n\n val_loader = DataLoader(dataset=datasets[\"val\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n return train_loader, val_loader", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def load(cfg, train_mode, split, shot, query,\n bs, test_bs, num_workers, pin_memory,\n ret_name=False):\n if train_mode == \"train\":\n dataset = COCOTrain(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=bs,\n shuffle=True,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n else:\n dataset = COCOTest(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=test_bs, # Large batch for evaluation\n shuffle=False,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n num_classes = 80\n return dataset, data_loader, num_classes", "def get_data_loader(target_classes, batch_size):\n classes = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n ########################################################################\n # The output of torchvision datasets are PILImage images of range [0, 1].\n # We transform them to Tensors of normalized range [-1, 1].\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n trainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\n # Get the list of indices to sample from\n relevant_train_indices = get_relevant_indices(\n trainset,\n classes,\n target_classes)\n # Split into train and validation\n np.random.seed(1000) # Fixed numpy random seed for reproducible shuffling\n np.random.shuffle(relevant_train_indices)\n split = int(len(relevant_train_indices) * 0.8)\n relevant_train_indices, relevant_val_indices = relevant_train_indices[:split], relevant_train_indices[split:]\n train_sampler = SubsetRandomSampler(relevant_train_indices)\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n num_workers=0, sampler=train_sampler)\n val_sampler = SubsetRandomSampler(relevant_val_indices)\n val_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n num_workers=0, sampler=val_sampler)\n testset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\n relevant_test_indices = get_relevant_indices(testset, classes, target_classes)\n test_sampler = SubsetRandomSampler(relevant_test_indices)\n test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size,\n num_workers=0, sampler=test_sampler)\n return train_loader, val_loader, test_loader, classes", "def load_cifa_10():\n train_set_x = np.ndarray([ 50000, 3072 ])\n train_set_y = np.ndarray( [50000] )\n\n batch_size = 10000\n for i in xrange(5):\n batch = open( datapath + \"data_batch_\"+str(i+1), 'rb')\n map = cPickle.load( batch )\n batch.close()\n train_set_x[ i*batch_size : (i+1)*batch_size , : ] = np.asarray( map[ 'data' ], dtype = 'float32' )\n train_set_y[ i*batch_size : (i+1)*batch_size ] = np.asarray( map[ 'labels' ], dtype = 'float32' )\n\n test_file = open( datapath + 'test_batch', 'rb')\n map = cPickle.load( test_file )\n test_file.close()\n \n test_set_x = np.asarray( map['data'], dtype = 'float32' )\n test_set_y = np.asarray( map['labels'], dtype = 'float32' )\n \n\n return train_set_x, train_set_y, test_set_x, test_set_y", "def load_CIFAR100(batch_dir):\r\n ims, coarse_labels, fine_labels = load_CIFAR_batch(batch_dir + '/train')\r\n ims_t, c_labels, f_labels = load_CIFAR_batch(batch_dir + '/test')\r\n ims = np.concatenate((ims, ims_t))\r\n coarse_labels = np.concatenate((coarse_labels, c_labels))\r\n fine_labels = np.concatenate((fine_labels, f_labels))\r\n return ims, coarse_labels, fine_labels", "def get_loader(domains,image_dir, crop_size=178, image_size=128,\n batch_size=16, mode='train', num_workers=1):\n transform = []\n \n if mode == 'train':\n transform.extend([T.ColorJitter(brightness=0.2, contrast=0.1, saturation=0.2, hue=0.1), T.RandomHorizontalFlip()])\n\n # transform.append(T.CenterCrop(crop_size))\n transform.append(T.Resize(image_size, interpolation=Image.LANCZOS))\n transform.append(T.ToTensor())\n transform.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),inplace=True))\n transform = T.Compose(transform)\n\n hold_out_size= 0 if mode == 'train' else 0\n dataset = CarDataset(domains,image_dir, transform, hold_out_size=hold_out_size)\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=(mode == 'train'),\n num_workers=num_workers)\n return data_loader", "def load_cifar(dataset_name='cifar10'):\n dataset_name = dataset_name.strip().lower().replace(' ', '')\n\n if dataset_name.lower() not in ['cifar10', 'cifar100']:\n raise ValueError('Only cifar10 or cifar100 are valid dataset_name.')\n baseURL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n if dataset_name == 'cifar100':\n baseURL = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n\n dirname = os.path.join(_trident_dir, dataset_name.strip())\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n\n \"\"\"Load CIFAR data from `path`\"\"\"\n _,filename,ext=split_path(baseURL)\n download_file(baseURL, dirname, filename+ext, dataset_name)\n file_path = os.path.join(dirname, filename+ext)\n\n\n if '.tar' in ext:\n extract_archive(file_path, dirname, archive_format='auto')\n filelist = glob.glob(dirname + '/*/*.*')\n extract_path ,_,_= split_path(filelist[0])\n filelist = [f for f in os.listdir(extract_path) if os.path.isfile(os.path.join(extract_path, f))]\n data=[]\n label=[]\n test_data=[]\n test_label=[]\n for file_path in filelist:\n if 'data_batch' in file_path:\n with open(os.path.join(extract_path,file_path), 'rb') as f:\n entry = pickle.load(f, encoding='latin1')\n data.append(entry['data'])\n label.append(entry['labels'])\n elif 'test_batch' in file_path:\n with open(os.path.join(extract_path,file_path), 'rb') as f:\n entry = pickle.load(f, encoding='latin1')\n test_data.append(entry['data'])\n test_label.append(entry['labels'])\n data = np.concatenate(data)\n data = data.reshape((data.shape[0], 3, 32, 32))\n data = data.transpose(0, 2, 3, 1).astype(np.float32)\n\n test_data = np.concatenate(test_data)\n test_data = test_data.reshape((test_data.shape[0], 3, 32, 32))\n test_data = test_data.transpose(0, 2, 3, 1).astype(np.float32)\n\n # Prepare labels\n label = np.concatenate(label)\n test_label = np.concatenate(test_label)\n\n trainData = Iterator(data=ImageDataset(data,object_type=ObjectType.rgb), label=LabelDataset(label,object_type=ObjectType.classification_label))\n testData = Iterator(data=ImageDataset(test_data,object_type=ObjectType.rgb), label=LabelDataset(test_label,object_type=ObjectType.classification_label))\n dataset = DataProvider(dataset_name, traindata=trainData, testdata=testData)\n dataset.binding_class_names(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship',\n 'truck'] if dataset_name == 'cifar10' else [], 'en-US')\n return dataset", "def get_loaders(train_dataset, val_dataset, test_dataset, batch_size=128):\n train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=8,\n shuffle=True)\n\n val_loader = DataLoader(val_dataset, batch_size=batch_size, num_workers=8,\n shuffle=False)\n\n test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=8,\n shuffle=False)\n\n return train_loader, val_loader, test_loader", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def load_CIFAR10(ROOT):\r\n xs = []\r\n ys = []\r\n for b in range(1,6):\r\n f = os.path.join(ROOT, 'data_batch_%d' % (b, ))\r\n X, Y = load_CIFAR_batch(f)\r\n xs.append(X)\r\n ys.append(Y)\r\n Xtr = np.concatenate(xs)\r\n Ytr = np.concatenate(ys)\r\n del X, Y\r\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\r\n return Xtr, Ytr, Xte, Yte\r\n\r\n\tdef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=10000):\r\n # Load the raw CIFAR-10 data\r\n \r\n cifar10_dir = 'Downloads/cifar-10-batches-py'\r\n \r\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\r\n\r\n # Subsample the data\r\n mask = range(num_training, num_training + num_validation)\r\n X_val = X_train[mask]\r\n y_val = y_train[mask]\r\n mask = range(num_training)\r\n X_train = X_train[mask]\r\n y_train = y_train[mask]\r\n mask = range(num_test)\r\n X_test = X_test[mask]\r\n y_test = y_test[mask]\r\n\r\n x_train = X_train.astype('float32') \r\n x_test = X_test.astype('float32')\r\n \r\n x_train = x_train.reshape(-1, 32, 32, 3)\r\n x_test = x_test.reshape(-1, 32, 32, 3)\r\n x_train /= 255\r\n x_test /= 255\r\n\r\n return x_train, y_train, X_val, y_val, x_test, y_test", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def get_data_loaders(args, tokenizer):\n personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache, args.train_lang)\n _ = personachat.pop(\"test\", None)\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": [], \"valid\": []}\n\n if args.train_lang in [\"En\", \"Fr\", \"It\", \"Id\", \"Jp\", \"Ko\", \"Zh\"]: #monolingual data\n for dataset_name, dataset in personachat.items():\n for dial in dataset[args.train_lang]: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lm_labels = True)\n datasets[dataset_name].append(instance) \n else: #multilingual data\n for dataset_name, dataset in personachat.items():\n for lang, dials in dataset.items():\n for dial in dials: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lang_id=\"<{}>\".format(lang.lower()), lm_labels = True)\n datasets[dataset_name].append(instance) #all langs together\n\n\n logger.info(\"Build train and validation dataloaders\")\n train_dataset = DatasetTrain(datasets[\"train\"])\n valid_dataset = DatasetTrain(datasets[\"valid\"])\n\n #logger.info(\"Build train and validation dataloaders\")\n #train_dataset, valid_dataset = TensorDataset(*tensor_datasets[\"train\"]), TensorDataset(*tensor_datasets[\"valid\"])\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None\n valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None\n train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, shuffle=(not args.distributed), collate_fn=collate_fn)\n valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.valid_batch_size, shuffle=False, collate_fn=collate_fn)\n\n # logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[0].shape))\n # #logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[1].shape))\n # logger.info(\"Valid dataset (Batch, Candidates, Seq length): {}\".format(valid_dataset.tensors[0].shape))\n logger.info(\"Train dataset length: {}\".format(len(train_dataset)))\n logger.info(\"Valid dataset length: {}\".format(len(valid_dataset)))\n return train_loader, valid_loader, train_sampler, valid_sampler", "def __init__(self, data_path, batch_size, **kwargs):\n super().__init__(batch_size, **kwargs)\n\n _, num_classes, X_train, y_train, X_val, y_val = load_cifar10_shard(shard_num=data_path, **kwargs)\n\n self.training_data_size = len(X_train)\n self.validation_data_size = len(X_val)\n self.num_classes = num_classes\n self.train_loader = self.create_loader(X=X_train, y=y_train, shuffle=True)\n self.val_loader = self.create_loader(X=X_val, y=y_val, shuffle=False)", "def setup_datasets(self):\r\n\r\n train_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.RandomRotation(degrees=self.random_angle, resample=Image.BILINEAR),\r\n transforms.RandomResizedCrop(\r\n size=self.crop_size, scale=(1-self.random_scale, 1+self.random_scale), ratio=(1, 1)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n val_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.CenterCrop(self.crop_size),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='train2014',\r\n transform=train_transform,\r\n dataset_size_ratio=self.dataset_size_ratio\r\n )\r\n train_subset_dataset = Subset(train_dataset, range(0, len(train_dataset), 5*self.dataset_size_ratio))\r\n val_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='val2014',\r\n transform=val_transform,\r\n )\r\n\r\n train_loader = DataLoader(\r\n train_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=True,\r\n num_workers=self.num_workers\r\n )\r\n train_subset_loader = DataLoader(\r\n train_subset_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n val_loader = DataLoader(\r\n val_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n return train_loader, train_subset_loader, val_loader", "def load_dataset_cifar10():\n dirname = 'cifar-10-batches-py'\n origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n path = get_file(dirname, origin=origin, untar=True)\n\n num_train_samples = 50000\n\n x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')\n y_train = np.empty((num_train_samples,), dtype='uint8')\n\n for i in range(1, 6):\n fpath = os.path.join(path, 'data_batch_' + str(i))\n (x_train[(i - 1) * 10000: i * 10000, :, :, :],\n y_train[(i - 1) * 10000: i * 10000]) = load_batch(fpath)\n\n fpath = os.path.join(path, 'test_batch')\n x_test, y_test = load_batch(fpath)\n\n y_train = np.reshape(y_train, (len(y_train), 1))\n y_test = np.reshape(y_test, (len(y_test), 1))\n\n return (x_train, y_train), (x_test, y_test)", "def get_driving_data_loaders(batch_size, train_dataset, valid_dataset, test_dataset, num_workers=0): \n\n valid_loader = DataLoader(dataset=valid_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=True)\n\n train_loader = DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n drop_last=True, \n shuffle=True)\n\n test_loader = DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=False)\n\n return train_loader, valid_loader, test_loader", "def load_cifar(hparams):\n all_labels = []\n\n total_batches_to_load = 5\n assert hparams.train_size + hparams.validation_size <= 50000\n if hparams.eval_test:\n total_batches_to_load += 1\n # Determine how many images we have loaded\n total_dataset_size = 50000\n train_dataset_size = total_dataset_size\n if hparams.eval_test:\n total_dataset_size += 10000\n\n if hparams.dataset == 'cifar10':\n all_images = []\n elif hparams.dataset == 'cifar100':\n all_images = np.empty((1, 50000, 3072), dtype=np.uint8)\n if hparams.eval_test:\n test_data = np.empty((1, 10000, 3072), dtype=np.uint8)\n if hparams.dataset == 'cifar10':\n datafiles = [\n 'data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4',\n 'data_batch_5']\n\n if hparams.eval_test:\n datafiles.append('test_batch')\n num_classes = 10\n elif hparams.dataset == 'cifar100':\n datafiles = ['train']\n if hparams.eval_test:\n datafiles.append('test')\n num_classes = 100\n else:\n raise NotImplementedError('Unimplemented dataset: ', hparams.dataset)\n if hparams.dataset != 'test':\n for file_num, f in enumerate(datafiles):\n d = unpickle(os.path.join(hparams.data_path, f))\n if hparams.dataset == 'cifar10':\n labels = np.array(d['labels'])\n else:\n labels = np.array(d['fine_labels'])\n if f == 'test':\n test_data[0] = copy.deepcopy(d['data'])\n if hparams.dataset == 'cifar10':\n all_images.append(test_data)\n else:\n all_images = np.concatenate([all_images, test_data], axis=1)\n else:\n if hparams.dataset == 'cifar10':\n all_images.append(copy.deepcopy(d['data']))\n else:\n all_images[file_num] = copy.deepcopy(d['data'])\n nsamples = len(labels)\n for idx in range(nsamples):\n all_labels.append(labels[idx])\n if hparams.dataset == 'cifar10':\n all_images = np.concatenate(all_images, axis=0)\n all_images = all_images.reshape(-1, 3072)\n all_images = all_images.reshape(-1, 3, 32, 32) # pylint: disable=too-many-function-args\n all_images = all_images.transpose(0, 2, 3, 1).copy()\n all_images = all_images / 255.0\n mean = augmentation_transforms.MEANS\n std = augmentation_transforms.STDS\n tf.logging.info('mean:{} std: {}'.format(mean, std))\n all_images = (all_images - mean) / std\n all_labels = np.eye(num_classes)[np.array(all_labels, dtype=np.int32)]\n\n assert len(all_images) == len(all_labels)\n tf.logging.info(\n 'In CIFAR10 loader, number of images: {}'.format(len(all_images)))\n\n extra_test_images = None\n extra_test_labels = None\n if hparams.extra_dataset == 'cifar10_1':\n extra_test_ds = tfds.as_numpy(\n tfds.load('cifar10_1', split='test', batch_size=-1))\n extra_test_images = ((extra_test_ds['image'] / 255.0) - mean) / std\n extra_test_labels = np.eye(num_classes)[np.array(\n extra_test_ds['label'], dtype=np.int32)]\n\n # Break off test data\n if hparams.eval_test:\n test_images = all_images[train_dataset_size:]\n test_labels = all_labels[train_dataset_size:]\n else:\n test_images = []\n test_labels = []\n all_images = all_images[:train_dataset_size]\n all_labels = all_labels[:train_dataset_size]\n return all_images, all_labels, test_images, test_labels, extra_test_images, extra_test_labels", "def cifar_getDataloaders(trainSampleNum, valSampleNum, trainBatchSize, \n valBatchSize, trainAugmentation, dataDir=\".\"):\n \n transform_train = cifar_augmentationFactory(trainAugmentation)\n transform_val = cifar_augmentationFactory(\"noaugment\")\n\n dataset_train = datasets.CIFAR10(#load train dataset\n root=dataDir, train=True, \n transform=transform_train, download=True\n )\n\n dataset_val = datasets.CIFAR10(#load test dataset\n root=dataDir, train=False, \n transform=transform_val, download=True\n )\n\n ssc = SmallSampleController(\n trainSampleNum=trainSampleNum, valSampleNum=valSampleNum, \n trainBatchSize=trainBatchSize, valBatchSize=valBatchSize, \n trainDataset=dataset_train, valDataset=dataset_val \n ) \n\n return ssc", "def load_cifar10(directory):\n train_data = []\n train_labels = []\n for b in range(1, 6):\n f = os.path.join(directory, 'data_batch_%d' % (b,))\n X, Y = load_cifar10_batch(f)\n train_data.append(X)\n train_labels.append(Y)\n train_data = np.concatenate(train_data)\n train_labels = np.concatenate(train_labels)\n del X, Y\n test_data, test_labels = load_cifar10_batch(os.path.join(directory, 'test_batch'))\n return train_data, train_labels, test_data, test_labels", "def get_loaders(\n data_dir,\n train_transforms=None,\n val_transforms=None,\n train_test_split=0.85,\n train_val_split=0.15,\n batch_size=16,\n shuffle=True):\n np.random.seed(24)\n train_ds = ImageFolder(root=data_dir, transform=train_transforms)\n val_ds = ImageFolder(root=data_dir, transform=val_transforms)\n test_ds = ImageFolder(root=data_dir, transform=val_transforms)\n img_count = len(train_ds)\n indices = list(range(img_count))\n test_split = int(img_count * train_test_split)\n if shuffle:\n np.random.shuffle(indices)\n train_idx, test_idx = indices[:test_split], indices[test_split:]\n train_count = len(train_idx)\n val_split = int(train_count * (1 - train_val_split))\n train_idx, val_idx = train_idx[:val_split], train_idx[val_split:]\n train_sample = SubsetRandomSampler(train_idx)\n val_sample = SubsetRandomSampler(val_idx)\n test_sample = SubsetRandomSampler(test_idx)\n train_loader = DataLoader(train_ds, batch_size=batch_size, sampler=train_sample)\n val_loader = DataLoader(val_ds, batch_size=batch_size, sampler=val_sample)\n test_loader = DataLoader(test_ds, batch_size=batch_size, sampler=test_sample)\n\n return train_loader, val_loader, test_loader", "def load_cifar10_data(self, data_path='data/cifar-10-batches-py',\n n_train_samples=50000, n_test_samples=10000):\n train_data = None\n train_labels = []\n\n for i in range(1, 6):\n data_dic = unpickle(data_path + '/data_batch_{}'.format(i))\n if i == 1:\n train_data = data_dic['data']\n else:\n train_data = np.vstack((train_data, data_dic['data']))\n\n train_labels += data_dic['labels']\n\n test_data_dic = unpickle(data_path + '/test_batch')\n test_data = test_data_dic['data']\n test_labels = test_data_dic['labels']\n\n train_data = train_data.reshape((len(train_data),\n self.LOADED_IMG_DEPTH,\n self.LOADED_IMG_HEIGHT,\n self.LOADED_IMG_HEIGHT))\n\n train_data = np.rollaxis(train_data, 1, 4)\n train_labels = np.array(train_labels)\n\n test_data = test_data.reshape((len(test_data),\n self.LOADED_IMG_DEPTH,\n self.LOADED_IMG_HEIGHT,\n self.LOADED_IMG_HEIGHT))\n\n test_data = np.rollaxis(test_data, 1, 4)\n test_labels = np.array(test_labels)\n\n self.train_dataset = {'data': train_data[0:n_train_samples],\n 'labels': train_labels[0:n_train_samples],\n 'cls': [np.zeros(10)\n for i in range(n_train_samples)]}\n\n for i in range(0, n_train_samples):\n self.train_dataset['cls'][i][self.train_dataset['labels'][i]] = 1.\n\n self.test_dataset = {'data': test_data[0:n_test_samples],\n 'labels': test_labels[0:n_test_samples],\n 'cls': [np.zeros(10)\n for i in range(n_train_samples)]}\n\n for i in range(0, n_test_samples):\n self.test_dataset['cls'][i][self.test_dataset['labels'][i]] = 1.\n\n self.train_dataset['data_array'] = np.array(\n [item.flatten() for item in self.train_dataset['data']])\n\n self.train_dataset['labels_array'] = np.array(\n [item.flatten() for item in self.train_dataset['labels']])\n\n self.train_dataset['cls_array'] = np.array(\n [item.flatten() for item in self.train_dataset['cls']])\n\n self.test_dataset['data_array'] = np.array(\n [item.flatten() for item in self.test_dataset['data']])\n\n self.test_dataset['labels_array'] = np.array(\n [item.flatten() for item in self.test_dataset['labels']])\n\n self.test_dataset['cls_array'] = np.array(\n [item.flatten() for item in self.test_dataset['cls']])\n\n return None", "def create_train_test(option, transform, params, split=0.2):\r\n clip_im_dir = option.clip_im_dir\r\n matting_dir = option.matting_dir\r\n csv_path = option.csv_path\r\n \r\n print(\"create datasets\")\r\n \r\n \r\n data_df = pd.read_csv(csv_path)\r\n # data_df = MergeDataframe(clip_im_dir, matting_dir)\r\n \r\n #separate data in training and test data (20/80)\r\n train_df, test_df = train_test_split(data_df, test_size=split)\r\n \r\n #search right Dataset class\r\n package_dir = Path(src.dataset.__file__).resolve().parent\r\n\r\n for (_, module_name, _) in iter_modules([package_dir]):\r\n # print(module_name, self.ComType)\r\n if option.dataset.lower() == module_name.lower() :\r\n modelModule = importlib.import_module(\".\"+module_name)\r\n break\r\n \r\n # train data\r\n training_set = modelModule(train_df, clip_im_dir, matting_dir, transform, transform)\r\n train_loader = DataLoader(training_set, **params)\r\n \r\n \r\n #test data\r\n testing_set = modelModule(test_df, clip_im_dir, matting_dir, transform, transform)\r\n test_loader = DataLoader(testing_set, **params)\r\n \r\n return train_loader, test_loader", "def get_each_loader(data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \n dataset = ML_Dataset(data_path, trn_negnum)\n \n if data_path.endswith('trn') == True:\n collate = dataset.train_collate\n else:\n collate = test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader", "def build_training_data_loader(self) -> DataLoader:\n pass", "def create_dataset_cifar10(data_path, batch_size=32, num_parallel_workers=8, do_train=True):\r\n # define dataset\r\n data_path = os.path.join(data_path, \"cifar-10-batches-bin\" if do_train else \"cifar-10-verify-bin\")\r\n\r\n cifar_ds = ds.Cifar10Dataset(data_path, num_parallel_workers=num_parallel_workers, shuffle=do_train)\r\n\r\n # define map operations\r\n resize_height, resize_width = 32, 32\r\n rescale = 1.0 / 255.0\r\n shift = 0.0\r\n random_crop_op = CV.RandomCrop([32, 32], [4, 4, 4, 4])\r\n random_horizontal_op = CV.RandomHorizontalFlip(prob=0.5)\r\n resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR)\r\n rescale_op = CV.Rescale(rescale, shift)\r\n normalize_op = CV.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010])\r\n hwc2chw_op = CV.HWC2CHW()\r\n type_cast_op = C.TypeCast(mstype.int32)\r\n\r\n if do_train:\r\n compose_op = [random_crop_op, random_horizontal_op, resize_op, rescale_op, normalize_op, hwc2chw_op]\r\n else:\r\n compose_op = [resize_op, rescale_op, normalize_op, hwc2chw_op]\r\n cifar_ds = cifar_ds.map(input_columns=\"image\", operations=compose_op, num_parallel_workers=num_parallel_workers)\r\n cifar_ds = cifar_ds.map(input_columns=\"label\", operations=type_cast_op, num_parallel_workers=num_parallel_workers)\r\n cifar_ds = cifar_ds.batch(batch_size, drop_remainder=True)\r\n\r\n return cifar_ds", "def get_train_loaders(config):\n assert 'loaders' in config, 'Could not find data loaders configuration'\n loaders_config = config['loaders']\n\n logger.info('Creating training and validation set loaders...')\n\n # get dataset class\n dataset_cls_str = loaders_config.get('dataset', None)\n if dataset_cls_str is None:\n dataset_cls_str = 'StandardHDF5Dataset'\n logger.warning(f\"Cannot find dataset class in the config. Using default '{dataset_cls_str}'.\")\n dataset_class = _loader_classes(dataset_cls_str)\n\n assert set(loaders_config['train']['file_paths']).isdisjoint(loaders_config['val']['file_paths']), \\\n \"Train and validation 'file_paths' overlap. One cannot use validation data for training!\"\n\n train_datasets = dataset_class.create_datasets(loaders_config, phase='train')\n\n val_datasets = dataset_class.create_datasets(loaders_config, phase='val')\n\n num_workers = loaders_config.get('num_workers', 1)\n logger.info(f'Number of workers for train/val dataloader: {num_workers}')\n batch_size = loaders_config.get('batch_size', 1)\n if torch.cuda.device_count() > 1 and not config['device'] == 'cpu':\n logger.info(\n f'{torch.cuda.device_count()} GPUs available. Using batch_size = {torch.cuda.device_count()} * {batch_size}')\n batch_size = batch_size * torch.cuda.device_count()\n\n logger.info(f'Batch size for train/val loader: {batch_size}')\n # when training with volumetric data use batch_size of 1 due to GPU memory constraints\n return {\n 'train': DataLoader(ConcatDataset(train_datasets), batch_size=batch_size, shuffle=True, pin_memory=True,\n num_workers=num_workers),\n # don't shuffle during validation: useful when showing how predictions for a given batch get better over time\n 'val': DataLoader(ConcatDataset(val_datasets), batch_size=batch_size, shuffle=False, pin_memory=True,\n num_workers=num_workers)\n }", "def load_cifar100_dataset(dirname, labels='fine', transpose_permutation=(0,2,3,1)):\n \n #Verify paths exists for training and testing set\n if not os.path.exists(dirname):\n raise IOError, \"Cannot find path %s\" % dirname\n \n if labels not in ['fine', 'coarse']:\n raise AttributeError, \"Labels argument must be set to 'coarse' or 'fine'\"\n \n if len(set(transpose_permutation)) != 4:\n raise AttributeError, \"Expect transpose permutation to be \"\n\n full_path = os.path.abspath(dirname)\n \n train_path = os.path.join(full_path, 'train')\n test_path = os.path.join(full_path, 'test')\n \n #Load the training set\n with open(train_path, 'rb') as tr_f:\n tr_data_raw = pickle.load(tr_f)\n tr_data = {}\n \n for key, val in tr_data_raw.items():\n tr_data[key.decode('utf8')] = val #32 x 32 x 3 images.\n \n tr_X = tr_data['data']\n \n if labels=='fine':\n tr_y = tr_data['fine_labels']\n elif labels=='coarse':\n tr_y = tr_data['coarse_labels']\n \n tr_X = tr_X.reshape(tr_X.shape[0], 3, 32, 32)\n tr_y = np.reshape(tr_y, (len(tr_y), 1))\n \n #Load the testing set\n with open(test_path, 'rb') as te_f:\n te_data_raw = pickle.load(te_f)\n te_data = {}\n \n for key, val in te_data_raw.items():\n te_data[key.decode('utf8')] = val #32 x 32 x 3 images.\n \n te_X = te_data['data']\n \n if labels=='fine':\n te_y = te_data['fine_labels']\n elif labels=='coarse':\n te_y = te_data['coarse_labels']\n \n te_X = te_X.reshape(te_X.shape[0], 3, 32, 32)\n te_y = np.reshape(te_y, (len(te_y), 1))\n \n #scale to 255, transpose as needed\n tr_X = np.transpose(tr_X.astype('float32') / 255., transpose_permutation)\n te_X = np.transpose(te_X.astype('float32') / 255., transpose_permutation)\n \n return (tr_X, tr_y), (te_X, te_y), 100", "def loadData(self):\n # Load the raw CIFAR-10 data\n num_training = 49000\n num_validation = 1000\n num_test = 1000\n subtract_mean = True\n\n cifar10_dir = '/home/parallels/PycharmProjects/Courses/232A/project2/stats232a/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n\n # Subsample the data\n mask = list(range(num_training, num_training + num_validation))\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = list(range(num_training))\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = list(range(num_test))\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n # Normalize the data: subtract the mean image\n if subtract_mean:\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n\n # Transpose so that channels come first\n X_train = X_train.transpose(0, 3, 1, 2)\n X_val = X_val.transpose(0, 3, 1, 2)\n X_test = X_test.transpose(0, 3, 1, 2)\n\n # Package data into a dictionary\n self.data = {\n 'X_train': X_train, 'y_train': y_train,\n 'X_val': X_val, 'y_val': y_val,\n 'X_test': X_test, 'y_test': y_test,\n }", "def get_loader(dataset='train.txt', crop_size=128, image_size=28, batch_size=2, mode='train', num_workers=1): \n transform = [] \n if mode == 'train': \n transform.append(transforms.RandomHorizontalFlip()) \n transform.append(transforms.CenterCrop(crop_size)) \n transform.append(transforms.Resize(image_size)) \n transform.append(transforms.ToTensor()) \n transform.append(transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))) \n transform = transforms.Compose(transform) \n train_data=MyDataset(txt=dataset, transform=transform) \n data_loader = DataLoader(dataset=train_data, \n batch_size=batch_size, \n shuffle=(mode=='train'), \n num_workers=num_workers) \n return data_loader", "def get_cifar_data(num_classes=10):\n\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\n x_train = x_train.astype(np.float32) / 255.\n x_test = x_test.astype(np.float32) / 255.\n\n y_train_cat = to_categorical(y_train, num_classes=num_classes).astype(np.float32)\n y_test_cat = to_categorical(y_test, num_classes=num_classes).astype(np.float32)\n\n return x_train, y_train, x_test, y_test, y_train_cat, y_test_cat", "def get_test_loader(data_dir,\n batch_size,\n shuffle=True,\n num_workers=4,\n pin_memory=False):\n normalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225],\n )\n\n # define transform\n transform = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n\n dataset = datasets.CIFAR10(\n root=data_dir, train=False,\n download=True, transform=transform,\n )\n\n data_loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=shuffle,\n num_workers=num_workers, pin_memory=pin_memory,\n )\n\n return data_loader", "def prepare_data_loaders(num_split, batch_size=32, hier=False, elmo=False, elmo_pre=None, use_elmo_pre=False, deepmoji=False, dev_with_label=False, include_test=False):\n train_data_loaders = []\n val_data_loaders = []\n test_data_loaders = []\n\n vocab = generate_vocab(deepmoji)\n for i in range(num_split):\n train, val, test, _ = prepare_data(batch_size=batch_size, hier=hier, elmo=elmo, elmo_pre=elmo_pre, use_elmo_pre=use_elmo_pre, deepmoji=deepmoji, is_shuffle=True, random_state=i, vocab=vocab, dev_with_label=dev_with_label, include_test=include_test)\n train_data_loaders.append(train)\n val_data_loaders.append(val)\n test_data_loaders.append(test)\n\n return train_data_loaders, val_data_loaders, test_data_loaders, vocab", "def get_loaders(opt):\n train_samples, val_samples = get_train_val_metadata(opt.dataset_dir,\n opt.validation_cities,\n opt.patch_size,\n opt.stride)\n print('train samples : ', len(train_samples))\n print('val samples : ', len(val_samples))\n\n logging.info('STARTING Dataset Creation')\n\n full_load = full_onera_loader(opt.dataset_dir, opt)\n\n train_dataset = OneraPreloader(opt.dataset_dir,\n train_samples,\n full_load,\n opt.patch_size,\n opt.augmentation)\n val_dataset = OneraPreloader(opt.dataset_dir,\n val_samples,\n full_load,\n opt.patch_size,\n False)\n\n logging.info('STARTING Dataloading')\n\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=opt.batch_size,\n shuffle=True,\n num_workers=opt.num_workers)\n val_loader = torch.utils.data.DataLoader(val_dataset,\n batch_size=opt.batch_size,\n shuffle=False,\n num_workers=opt.num_workers)\n return train_loader, val_loader", "def create_split_loaders(root_dir, batch_size, seed=0, transform=transforms.ToTensor(),\n p_val=0.1, p_test=0.2, shuffle=True, \n show_sample=False, extras={}):\n \n\n # once all single json datasets are created you can concat them into a single one:\n quickdraw_dataset = CharacterDataset(root_dir=root_dir, transform=transform)\n \n # Dimensions and indices of training set\n dataset_size = len(quickdraw_dataset)\n all_indices = list(range(dataset_size))\n\n # Shuffle dataset before dividing into training & test sets\n if shuffle:\n np.random.seed(seed)\n np.random.shuffle(all_indices)\n \n # Create the validation split from the full dataset\n val_split = int(np.floor(p_val * dataset_size))\n train_ind, val_ind = all_indices[val_split :], all_indices[: val_split]\n \n # Separate a test split from the training dataset\n test_split = int(np.floor(p_test * len(train_ind)))\n train_ind, test_ind = train_ind[test_split :], train_ind[: test_split]\n print(len(train_ind), len(val_ind), len(test_ind))\n # Use the SubsetRandomSampler as the iterator for each subset\n sample_train = SubsetRandomSampler(train_ind)\n sample_test = SubsetRandomSampler(test_ind)\n sample_val = SubsetRandomSampler(val_ind)\n\n num_workers = 0\n pin_memory = False\n # If CUDA is available\n if extras:\n num_workers = extras[\"num_workers\"]\n pin_memory = extras[\"pin_memory\"]\n \n # Define the training, test, & validation DataLoaders\n train_loader = DataLoader(quickdraw_dataset, batch_size=batch_size, \n sampler=sample_train, num_workers=num_workers, \n pin_memory=pin_memory)\n\n test_loader = DataLoader(quickdraw_dataset, batch_size=batch_size, \n sampler=sample_test, num_workers=num_workers, \n pin_memory=pin_memory)\n\n val_loader = DataLoader(quickdraw_dataset, batch_size=batch_size,\n sampler=sample_val, num_workers=num_workers, \n pin_memory=pin_memory)\n\n \n # Return the training, validation, test DataLoader objects\n return (train_loader, val_loader, test_loader)", "def __init__(self, dataset, batch_size, n_threads=4,\n\t ten_crop=False, data_path='/home/dataset/', logger=None):\n\t\tself.dataset = dataset\n\t\tself.batch_size = batch_size\n\t\tself.n_threads = n_threads\n\t\tself.ten_crop = ten_crop\n\t\tself.data_path = data_path\n\t\tself.logger = logger\n\t\tself.dataset_root = data_path\n\t\t\n\t\tself.logger.info(\"|===>Creating data loader for \" + self.dataset)\n\t\t\n\t\tif self.dataset in [\"cifar100\"]:\n\t\t\tself.train_loader, self.test_loader = self.cifar(\n\t\t\t\tdataset=self.dataset)\n\n\t\telif self.dataset in [\"cifar10\"]:\n\t\t\tself.train_loader, self.test_loader = self.cifar(\n dataset=self.dataset)\n\t\t\n\t\telif self.dataset in [\"imagenet\"]:\n\t\t\tself.train_loader, self.test_loader = self.imagenet(\n\t\t\t\tdataset=self.dataset)\n\t\telse:\n\t\t\tassert False, \"invalid data set\"", "def load_cifar10(directory, normalize=True):\n training_data = []\n training_labels = []\n for i in range(1, 6):\n try:\n d = unpickle(directory + f\"/data_batch_{i}\")\n except FileNotFoundError:\n raise Exception(f\"File 'data_batch_{i}' is not found in the specified directory '{directory}'.\")\n training_data.append(d[b\"data\"])\n training_labels.append(d[b\"labels\"])\n training_data = np.vstack(training_data)\n training_data = np.reshape(training_data, newshape=(-1, 3, 32, 32))\n training_labels = np.concatenate(training_labels)\n training_labels = np.array(list(map(lambda hot: one_hot(10, hot), training_labels)))\n\n try:\n test = unpickle(directory + \"/test_batch\")\n except FileNotFoundError:\n raise Exception(f\"File 'test_batch' is not found in the specified directory '{directory}'.\")\n test_data = np.reshape(test[b\"data\"], newshape=(-1, 3, 32, 32))\n test_labels = np.array(list(map(lambda hot: one_hot(10, hot), test[b\"labels\"])))\n\n try:\n meta = unpickle(directory + \"/batches.meta\")\n except FileNotFoundError:\n raise Exception(f\"File 'batches.meta' is not found in the specified directory '{directory}'.\")\n label_names = meta[b\"label_names\"]\n label_names = list(map(lambda x: x.decode(\"utf-8\"), label_names))\n\n if normalize:\n training_data = training_data / 255\n test_data = test_data / 255\n\n return training_data, training_labels, test_data, test_labels, label_names", "def load_data(self, dataset='cifar10', label_mode='fine'):\n if dataset == 'cifar10':\n if self.root:\n x_train, y_train = self.load_from_path(\n [os.path.join(self.root, f'data_batch_{i}') for i in range(1, 6)])\n x_test, y_test = self.load_from_path(\n [os.path.join(self.root, 'test_batch')])\n x_test = x_test.astype(x_train.dtype)\n y_test = y_test.astype(y_train.dtype)\n return (x_train, y_train), (x_test, y_test)\n else:\n return tf.keras.datasets.cifar10.load_data()\n elif dataset in ['cifar20', 'cifar100']:\n if self.root:\n x_train, y_train = self.load_from_path(\n [os.path.join(self.root, 'train')], label_key=label_mode)\n x_test, y_test = self.load_from_path([os.path.join(self.root, 'test')])\n x_test = x_test.astype(x_train.dtype)\n y_test = y_test.astype(y_train.dtype)\n return (x_train, y_train), (x_test, y_test)\n else:\n return tf.keras.datasets.cifar100.load_data(label_mode=label_mode)", "def load_CIFAR10(ROOT):\n xs = []\n ys = []\n for b in range(1,2):\n f = os.path.join(ROOT, 'data_batch_%d' % b)\n X, Y = load_CIFAR_batch(f)\n xs.append(X)\n ys.append(Y) \n #利用np.concatenate将xs、ys弄成一行\n Xtr = np.concatenate(xs)\n Ytr = np.concatenate(ys)\n del X, Y\n #获取测试集\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\n return Xtr, Ytr, Xte, Yte", "def get_train_test_loaders(self, num_workers=2):\n print('Loading the image data...')\n \n train_path_info, test_path_info = self.get_train_test_info()\n\n train_transform = transforms.Compose([transforms.Resize((self.image_width, self.image_height)),\n transforms.RandomAffine(10,translate=(0.1,0.1)),\n transforms.ToTensor()])\n\n test_transform = transforms.Compose([transforms.Resize((self.image_width, self.image_height)),\n transforms.ToTensor()])\n\n trainset = PocovidDataset(train_path_info, transform = train_transform)\n testset = PocovidDataset(test_path_info, transform = test_transform)\n \n self.class_map = trainset.get_class_map()\n self.classes = [self.class_map[key] for key in sorted(self.class_map)]\n\n train_loader = torch.utils.data.DataLoader(trainset, num_workers=num_workers, shuffle=True,\n batch_size=self.batch_size, drop_last=True)\n\n test_loader = torch.utils.data.DataLoader(testset, num_workers=num_workers, shuffle=True,\n batch_size=self.batch_size)\n \n print('Image data is loaded with fold {} as the test data'.format(self.fold))\n print('Number of training images:', len(trainset))\n print('Number of testing images:', len(testset))\n print('*'*100)\n print('The classes are:', self.classes)\n print('*'*100)\n \n return train_loader, test_loader", "def load_data(distorted, data_path, dataset):\n x_train, y_train = [], []\n if dataset == 'cifar10':\n num_classes = 10\n for filename in ['data_batch_' + str(i) for i in np.arange(1, 6)]:\n dic = load_file(filename, data_path)\n x_train.append(dic['data'])\n y_train.append(np.asarray(dic['labels']))\n\n x_train = np.vstack(tuple(x_train))\n y_train = np.concatenate(tuple(y_train))\n\n dic = load_file('test_batch', data_path)\n x_test = dic['data']\n y_test = np.asarray(dic['labels'])\n elif dataset == 'cifar100':\n num_classes = 100\n dic = load_file('train', data_path)\n x_train = dic['data']\n y_train = np.asarray(dic['fine_labels'])\n dic = load_file('test', data_path)\n x_test = dic['data']\n y_test = np.asarray(dic['fine_labels'])\n else:\n raise NotImplementedError('Dataset should be cifar10 or cifar100')\n\n # Preprocessing\n x_train = np.reshape(x_train, [x_train.shape[0], 3, 32, 32])\n x_test = np.reshape(x_test, [x_test.shape[0], 3, 32, 32])\n x_train = np.transpose(x_train, [0, 2, 3, 1])\n x_test = np.transpose(x_test, [0, 2, 3, 1])\n\n y_train = tf.keras.utils.to_categorical(y_train, num_classes=num_classes)\n y_test = tf.keras.utils.to_categorical(y_test, num_classes=num_classes)\n\n x_train = preprocess(x_train, distorted=distorted)\n x_test = preprocess(x_test, distorted=False)\n\n # Convert from tf Tensor to numpy array\n sess = tf.Session()\n with sess.as_default():\n x_train_np, x_test_np = sess.run([x_train, x_test])\n\n return (x_train_np, y_train), (x_test_np, y_test)", "def get_datasets(data):\n train_dataset, test_dataset = None, None\n data_dir = '../data'\n\n if data == 'fmnist':\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.2860], std=[0.3530])])\n train_dataset = datasets.FashionMNIST(data_dir, train=True, download=True, transform=transform)\n test_dataset = datasets.FashionMNIST(data_dir, train=False, download=True, transform=transform)\n \n elif data == 'fedemnist':\n train_dir = '../data/Fed_EMNIST/fed_emnist_all_trainset.pt'\n test_dir = '../data/Fed_EMNIST/fed_emnist_all_valset.pt'\n train_dataset = torch.load(train_dir)\n test_dataset = torch.load(test_dir) \n \n elif data == 'cifar10':\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010)),\n ])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010)),\n ])\n train_dataset = datasets.CIFAR10(data_dir, train=True, download=True, transform=transform_train)\n test_dataset = datasets.CIFAR10(data_dir, train=False, download=True, transform=transform_test)\n train_dataset.targets, test_dataset.targets = torch.LongTensor(train_dataset.targets), torch.LongTensor(test_dataset.targets) \n \n return train_dataset, test_dataset", "def load_CIFAR10(ROOT):\n xs = []\n ys = []\n for b in range(1, 6):\n f = os.path.join(ROOT, 'data_batch_%d' % (b,))\n X, Y = load_CIFAR_batch(f)\n xs.append(X)\n ys.append(Y)\n Xtr = np.concatenate(xs)\n Ytr = np.concatenate(ys)\n del X, Y\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\n return Xtr, Ytr, Xte, Yte", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def load_cifar() -> Tuple[torchvision.datasets.CIFAR10, torchvision.datasets.CIFAR10]:\n \n # Define the transform for the data.\n transform = transforms.Compose(\n [transforms.ToTensor(), \n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n )\n \n # Initialize Datasets. CIFAR-10 will automatically download if not present\n trainset = torchvision.datasets.CIFAR10(\n root=DATA_ROOT, train=True, download=True, transform=transform\n )\n testset = torchvision.datasets.CIFAR10(\n root=DATA_ROOT, train=False, download=True, transform=transform\n )\n \n # Return the datasets\n return trainset, testset", "def load_CIFAR10(ROOT):\n xs = []\n ys = []\n for b in range(1,6):\n f = os.path.join(ROOT, 'data_batch_%d' % (b, ))\n X, Y = load_CIFAR_batch(f)\n xs.append(X)\n ys.append(Y)\n Xtr = np.concatenate(xs)\n Ytr = np.concatenate(ys)\n del X, Y\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\n return Xtr, Ytr, Xte, Yte", "def get_test_data_loaders(cap_files, visual_feats, vocab, bow2vec, batch_size=100, num_workers=2, n_caption=2, video2frames = None):\n dset = {'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames = video2frames['test'])}\n\n\n data_loaders = {x: torch.utils.data.DataLoader(dataset=dset[x],\n batch_size=batch_size,\n shuffle=False,\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate_frame_gru_fn)\n for x in cap_files }\n return data_loaders", "def data_loaders(args):\n\n transform = transforms.Compose([\n transforms.Resize(64),\n transforms.ToTensor(),\n lambda image: (image - 0.5) * 2\n ])\n\n train_mnist = datasets.MNIST(\n root=args.database_root,\n train=True,\n download=True,\n transform=transform\n )\n train_loader = DataLoader(\n dataset=train_mnist,\n batch_size=args.train_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n test_mnist = datasets.MNIST(\n root=args.database_root,\n train=False,\n download=True,\n transform=transform\n )\n test_loader = DataLoader(\n dataset=test_mnist,\n batch_size=args.test_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n return train_loader, test_loader", "def load_cifar10(batch_paths):\n batches = [load_cifar10_batch(path) for path in batch_paths]\n data = torch.cat([batch[0] for batch in batches])\n labels = torch.cat([batch[1] for batch in batches])\n return data, labels", "def load_CIFAR10(path):\r\n sampleList = []\r\n labelList = []\r\n # load all the data, as there only five training samples name as data_batch_id\r\n for i in range(1, 6):\r\n # get full filename\r\n filename = os.path.join(path, 'data_batch_%d' % (i, ))\r\n x, y = load_CIFAR_batch(filename)\r\n\r\n sampleList.append(x)\r\n labelList.append(y)\r\n\r\n # combine elements as one array\r\n Xtr = np.concatenate(sampleList)\r\n Ytr = np.concatenate(labelList)\r\n del x, y\r\n print(\"Training data loaded, total size : %d\", len(Xtr))\r\n # load test data\r\n Xte, Yte = load_CIFAR_batch(os.path.join(path, 'test_batch'))\r\n return Xtr, Ytr, Xte, Yte", "def get_data_loaders(data, batch_size, ratio=0.8, num_workers=1):\n train_size = int(len(data) * ratio)\n val_size = len(data) - train_size\n train_set, val_set = random_split(data, [train_size, val_size])\n data_train_loader = DataLoader(train_set, batch_size=batch_size, num_workers=num_workers, shuffle=True)\n data_val_loader = DataLoader(val_set, batch_size=batch_size, num_workers=num_workers, shuffle=True)\n return data_train_loader, data_val_loader", "def create_cifar10_dataset(cifar_dir):\n ds = de.Cifar10Dataset(cifar_dir)\n\n training = True\n resize_height = 224\n resize_width = 224\n rescale = 1.0 / 255.0\n shift = 0.0\n repeat_num = 10\n batch_size = 32\n\n # define map operations\n random_crop_op = vision.RandomCrop((32, 32), (4, 4, 4, 4)) # padding_mode default CONSTANT\n random_horizontal_op = vision.RandomHorizontalFlip()\n resize_op = vision.Resize((resize_height, resize_width)) # interpolation default BILINEAR\n rescale_op = vision.Rescale(rescale, shift)\n normalize_op = vision.Normalize((0.4465, 0.4822, 0.4914), (0.2010, 0.1994, 0.2023))\n changeswap_op = vision.HWC2CHW()\n type_cast_op = C.TypeCast(mstype.int32)\n\n if training:\n c_trans = [random_crop_op, random_horizontal_op]\n c_trans += [resize_op, rescale_op, normalize_op,\n changeswap_op]\n\n # apply map operations on images\n ds = ds.map(operations=type_cast_op, input_columns=\"label\")\n ds = ds.map(operations=c_trans, input_columns=\"image\")\n\n # apply repeat operations\n ds = ds.repeat(repeat_num)\n\n # apply shuffle operations\n ds = ds.shuffle(buffer_size=10)\n\n # apply batch operations\n ds = ds.batch(batch_size=batch_size, drop_remainder=True)\n\n return ds", "def create_dataloaders(data_dir):\n\n trng_dataset = datasets.ImageFolder(data_dir / TRNG_FOLDER,\n transform=flowernet.trng_transform)\n trng_dataloader = torch.utils.data.DataLoader(trng_dataset,\n batch_size=64,\n shuffle=True)\n\n valn_dataset = datasets.ImageFolder(data_dir / VALN_FOLDER,\n transform=flowernet.pred_transform)\n valn_dataloader = torch.utils.data.DataLoader(valn_dataset,\n batch_size=64,\n shuffle=True)\n\n return trng_dataloader, valn_dataloader", "def run(dataset_dir):\n if not tf.gfile.Exists(dataset_dir):\n tf.gfile.MakeDirs(dataset_dir)\n\n training_filename = _get_output_filename(dataset_dir, 'train')\n testing_filename = _get_output_filename(dataset_dir, 'test')\n\n if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):\n print('Dataset files already exist. Exiting without re-creating them.')\n return\n\n dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)\n\n # First, process the training data:\n with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:\n offset = 0\n for i in range(_NUM_TRAIN_FILES):\n filename = os.path.join(dataset_dir,\n 'cifar-100-python',\n 'train')\n offset = _add_to_tfrecord(filename, tfrecord_writer, offset)\n\n # Next, process the testing data:\n with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:\n filename = os.path.join(dataset_dir,\n 'cifar-100-python',\n 'test')\n _add_to_tfrecord(filename, tfrecord_writer)\n\n # Finally, write the labels file:\n labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))\n dataset_utils.write_label_file(labels_to_class_names, dataset_dir)\n\n _clean_up_temporary_files(dataset_dir)\n print('\\nFinished converting the Cifar100 dataset!')", "def fetch_cifar100_efficient_kd_dataloaders(args):\n\n loaders = {}\n for mode in [\"train\", \"test\"]:\n dataset = CachedKDDataset(mode=mode)\n loaders[mode] = \\\n torch.utils.data.DataLoader(\n dataset,\n batch_size=args.batch_size,\n shuffle=(mode == \"train\"),\n num_workers=4,\n collate_fn=dict_collate\n )\n\n return loaders[\"train\"], loaders[\"test\"]", "def make_loader(dataset, train_batch_size, validation_split=0.2):\n # number of samples in train and test set\n train_len = int(len(dataset) * (1 - validation_split))\n test_len = len(dataset) - train_len\n train_set, test_set = torch.utils.data.random_split(dataset, [train_len, test_len])\n # create train_loader\n train_loader = torch.utils.data.DataLoader(\n train_set, batch_size=train_batch_size, shuffle=True,\n )\n # create test_loader\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False,)\n return train_loader, test_loader", "def data_loaders(dataset_path):\n dataset_path = dataset_path\n news_stock_dataset = NewsStockDataLoader(dataset_path)\n \n dataset_size = len(news_stock_dataset)\n indices = list(range(dataset_size))\n training_split = int(0.8 * dataset_size)\n validation_split = int(0.9 * dataset_size)\n\n np.random.seed(96)\n np.random.shuffle(indices)\n\n train_indices = indices[:training_split]\n valid_indices = indices[training_split:validation_split]\n test_indices = indices[validation_split:]\n\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(valid_indices)\n test_sampler = SubsetRandomSampler(test_indices)\n \n collate = PadSequence()\n\n training_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"training_batch_size\"),\n sampler = train_sampler,\n collate_fn = collate)\n\n validation_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"validation_batch_size\"),\n sampler = valid_sampler,\n collate_fn = collate)\n\n testing_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"testing_batch_size\"),\n sampler= test_sampler,\n collate_fn = collate)\n \n return training_loader, validation_loader, testing_loader", "def get_loader(root_folder, batch_size=16, shuffle=False, num_workers=0, pin_memory=False):\n cal101_dset = get_dataset(root_folder) \n\n # train test split \n split_ratio = 0.2 \n dataset_size = len(cal101_dset)\n indices = np.arange(dataset_size)\n np.random.shuffle(indices)\n split = int(np.floor(split_ratio * dataset_size))\n train_indices, val_indices = indices[split:], indices[:split]\n\n train_sampler = data.SubsetRandomSampler(train_indices)\n valid_sampler = data.SubsetRandomSampler(val_indices) \n\n train_loader = data.DataLoader( cal101_dset, batch_size=batch_size, \n shuffle=shuffle,num_workers=num_workers, sampler=train_sampler, pin_memory=pin_memory)\n validation_loader = data.DataLoader(cal101_dset, batch_size=batch_size,\n shuffle=shuffle,num_workers=num_workers, sampler=valid_sampler, pin_memory=pin_memory)\n\n return train_loader, validation_loader", "def load_cifar10(data_path=\".\", test_size=0.2, random_state=1337):\n test_path = os.path.join(data_path, \"cifar-10-batches-py/test_batch\")\n train_paths = [os.path.join(data_path, \"cifar-10-batches-py/data_batch_%i\" % i) for i in range(1, 6)]\n\n if not os.path.exists(test_path) or not all(list(map(os.path.exists, train_paths))):\n print (\"Dataset not found. Downloading...\")\n download_cifar(data_path,\n url='https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',\n tarname='cifar-10-python.tar.gz')\n\n train_batches = list(map(unpickle, train_paths))\n test_batch = unpickle(test_path)\n\n X = np.concatenate([batch[\"data\"] for batch in train_batches]).reshape([-1, 3, 32, 32]).astype('float32') / 255\n y = np.concatenate([batch[\"labels\"] for batch in train_batches]).astype('int32')\n X_train, X_val, y_train, y_val = train_test_split(X, y,\n test_size=test_size,\n random_state=random_state)\n\n X_test = test_batch[\"data\"].reshape([-1, 3, 32, 32]).astype('float32') / 255\n y_test = np.array(test_batch[\"labels\"]).astype('int32')\n\n return X_train, y_train, X_val, y_val, X_test, y_test", "def get_loader(mode):\n\tglobal train_loader, valid_loader\n\tconfig = Config\n\ttransform_list_train = []\n\ttransform_list_test = []\n\tis_train = mode == \"train\"\n\tif config.train.use_augmentation:\n\t\ttransform_list_train.extend([transforms.Resize((config.data.image_size, config.data.image_size)), ImageNetPolicy()])\n\ttransform_list_train.extend([transforms.Resize((config.data.image_size, config.data.image_size)), transforms.ToTensor(),\n\t transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n\t\n\ttransform_train = transforms.Compose(transform_list_train)\n\t\n\tif config.predict.use_augmentation:\n\t\ttransform_list_test.extend([transforms.Resize((config.data.image_size, config.data.image_size)), ImageNetPolicy()])\n\ttransform_list_test.extend([transforms.Resize((config.data.image_size, config.data.image_size)), transforms.ToTensor(),\n\t transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n\t\n\ttransform_test = transforms.Compose(transform_list_test)\n\tif config.model.dataset == \"mnist\":\n\t\tmnist = datasets.MNIST(root=config.data.mnist_path, download=True, transform=transform_train, train=is_train)\n\t\t# train-validation split\n\t\ttrain_mnist, valid_mnist = train_valid_split(mnist)\n\t\ttrain_loader = DataLoader(dataset=train_mnist, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)\n\t\tvalid_loader = DataLoader(dataset=valid_mnist, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)\n\tif config.model.dataset == \"svhn\":\n\t\tsvhn = datasets.SVHN(root=config.data.svhn_path, download=True, transform=transform_train, split=mode)\n\t\ttrain_svhn, valid_svhn = train_valid_split(svhn)\n\t\ttrain_loader = DataLoader(dataset=train_svhn, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)\n\t\tvalid_loader = DataLoader(dataset=valid_svhn, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)\n\tif config.model.dataset == \"cifar10\":\n\t\tcifar10 = datasets.CIFAR10(root=config.data.cifar10_path, download=True, transform=transform_train, train=is_train)\n\t\ttrain_cifar, valid_cifar = train_valid_split(cifar10)\n\t\ttrain_loader = DataLoader(dataset=train_cifar, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)\n\t\tvalid_loader = DataLoader(dataset=valid_cifar, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)\n\tif config.model.dataset == \"moons\":\n\t\ttrain_moons, valid_moons = train_valid_split(moons_dataset())\n\t\ttrain_loader = DataLoader(dataset=train_moons, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)\n\t\tvalid_loader = DataLoader(dataset=valid_moons, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)\n\tif config.model.dataset == \"miniimagenet\":\n\t\ttrain_loader, valid_loader = read_dataset(Config.data.miniimagenet_path, transform_train, transform_test) # transform_train(train_loader)\n\tif config.model.dataset == \"miniimagenet_all\":\n\t\ttrain_loader = datasets.ImageFolder(root=config.data.miniimagenet_path_train, transform=transform_train)\n\t\tvalid_loader = datasets.ImageFolder(root=config.data.miniimagenet_path_valid, transform=transform_list_test)\n\t\t# \t# test_imagenet = datasets.ImageFolder(root=config.data.miniimagenet_path_test)\n\t\ttrain_loader = DataLoader(dataset=train_loader, batch_size=Config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)\n\t\tvalid_loader = DataLoader(dataset=valid_loader, batch_size=Config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)\n\t# \t# test_loader = DataLoader(dataset=test_imagenet, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)\n\tif config.model.dataset == \"miniimagenet_concat\":\n\t\tconcat_dataset = ConcatDataset([datasets.ImageFolder(config.data.miniimagenet_path_train, transform=transform_train),\n\t\t datasets.ImageFolder(config.data.miniimagenet_path_valid, transform=transform_train)])\n\t\ttrain_, valid_ = train_valid_split(concat_dataset)\n\t\ttrain_loader = torch.utils.data.DataLoader(train_, batch_size=config.train.batch_size, shuffle=True, num_workers=config.data.num_workers, pin_memory=True)\n\t\tvalid_loader = torch.utils.data.DataLoader(valid_, batch_size=config.train.batch_size, shuffle=True, num_workers=config.data.num_workers, pin_memory=True)\n\t\n\tif config.model.dataset == \"imagenet\":\n\t\ttrain_loader = datasets.ImageFolder(root=config.data.imagenet_path_train, transform=transform_train)\n\t\tvalid_loader = datasets.ImageFolder(root=config.data.imagenet_path_val, transform=transform_list_test)\n\t\t# \t# test_imagenet = datasets.ImageFolder(root=config.data.miniimagenet_path_test)\n\t\ttrain_loader = DataLoader(dataset=train_loader, batch_size=Config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)\n\t\tvalid_loader = DataLoader(dataset=valid_loader, batch_size=Config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)\n\treturn train_loader, valid_loader", "def load_data(root, num_seen, batch_size, num_workers):\n CIFAR10.init(root, num_seen)\n query_dataset = CIFAR10('query', transform=query_transform())\n seen_dataset = CIFAR10('seen', transform=train_transform())\n unseen_dataset = CIFAR10('unseen', transform=train_transform())\n retrieval_dataset = CIFAR10('retrieval', transform=train_transform())\n\n query_dataloader = DataLoader(\n query_dataset,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n seen_dataloader = DataLoader(\n seen_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n unseen_dataloader = DataLoader(\n unseen_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n retrieval_dataloader = DataLoader(\n retrieval_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n return query_dataloader, seen_dataloader, unseen_dataloader, retrieval_dataloader", "def load_test_dataset(self):\n test_data_path = \"testdata\"\n root = Path(test_data_path)\n classes = sorted([j.name.split('/')[-1] for j in root.iterdir()])\n print(classes)\n\n transform = transforms.Compose([\n transforms.Resize(300),\n transforms.RandomHorizontalFlip(),\n transforms.CenterCrop(250),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.6071, 0.4828, 0.3934], std=[0.2845, 0.3187, 0.3240])\n ])\n\n dataset = datasets.ImageFolder(test_data_path, transform=transform)\n testloader = DataLoader(dataset, batch_size=4, shuffle=True)\n print(\"Loaded data\")\n return testloader", "def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):\n # Load the raw CIFAR-10 data\n cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n \n # Subsample the data\n mask = range(num_training, num_training + num_validation)\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = range(num_training)\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = range(num_test)\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n # Normalize the data: subtract the mean image\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n\n # Reshape data to rows\n X_train = X_train.reshape(num_training, -1)\n X_val = X_val.reshape(num_validation, -1)\n X_test = X_test.reshape(num_test, -1)\n\n pca = decomposition.PCA(n_components=None, whiten=False)\n X_train = pca.fit_transform(X_train)\n X_val = pca.transform(X_val)\n X_test = pca.transform(X_test)\n\n return X_train, y_train, X_val, y_val, X_test, y_test", "def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')", "def get_loaders(img_size=CONFIG[\"matrix_size\"], batch_size=CONFIG[\"batch_size\"],\n used_keypoints=CONFIG[\"used_keypoints\"], interpolation_frames=CONFIG[\"interpolation_frames\"],\n noise_frames=CONFIG[\"noise_frames\"], all_data=None, all_labels=None):\n\n if all_data is None or all_labels is None:\n all_data, all_labels = load_video_data_labels(interpolation_frames, noise_frames, used_keypoints, img_size)\n\n p = np.random.permutation(len(all_data))\n train_len = int(len(p) / 80)\n others_len = int((len(p) - train_len) / 2)\n\n train_data, train_labels = all_data[p[:train_len]], all_labels[p[:train_len]]\n val_data = all_data[p[train_len:train_len + others_len]]\n val_labels = all_labels[p[train_len:train_len + others_len]]\n test_data, test_labels = all_data[p[-others_len:]], all_labels[p[-others_len:]]\n\n # Transform to tensor\n train_data_tensor, train_labels_tensor = torch.from_numpy(train_data), torch.from_numpy(train_labels)\n val_data_tensor, val_labels_tensor = torch.from_numpy(val_data), torch.from_numpy(val_labels)\n test_data_tensor, test_labels_tensor = torch.from_numpy(test_data), torch.from_numpy(test_labels)\n\n # Data Loader for easy mini-batch return in training, load the Dataset from the numpy arrays\n train_loader = DataLoader(TensorDataset(train_data_tensor, train_labels_tensor), batch_size=batch_size)\n val_loader = DataLoader(TensorDataset(val_data_tensor, val_labels_tensor), batch_size=batch_size)\n test_loader = DataLoader(TensorDataset(test_data_tensor, test_labels_tensor), batch_size=batch_size)\n\n data = {\"train_data\": train_data,\n \"train_labels\": train_labels,\n \"val_data\": val_data,\n \"val_labels\": val_labels,\n \"test_data\": test_data,\n \"test_labels\": test_labels,\n \"all_data\": all_data[p],\n \"all_labels\": all_labels[p]}\n\n return data, train_loader, val_loader, test_loader", "def get_test_loader(id_list = './data/sample_submission.csv', root_dir = './data/test/'):\n data = HumanProteinDataset(id_list, root_dir, transform = transforms.Compose([\n Rescale((256, 256)), \n ToTensor()\n ]))\n\n indices = np.arange(len(data))\n dataloader_test = DataLoader(data, batch_size=10, num_workers=5)\n\n return dataloader_test", "def _load_raw_datashards(shard_num, nb_collaborators): \n train_obj = torchvision.datasets.CIFAR10('~/.CIFAR10', train=True, download=True) \n test_obj = torchvision.datasets.CIFAR10('~/.CIFAR10', train=False, download=True) \n x_train = train_obj.data\n y_train = np.asarray(train_obj.targets)\n x_test = test_obj.data\n y_test = np.asarray(test_obj.targets)\n # fix the label dimension to be (N,)\n y_train = y_train.reshape(-1)\n y_test = y_test.reshape(-1) \n \n # create the shards\n X_train_shards = x_train[shard_num::nb_collaborators]\n y_train_shards = y_train[shard_num::nb_collaborators]\n \n X_test_shards = x_test[shard_num::nb_collaborators]\n y_test_shards = y_test[shard_num::nb_collaborators]\n return (X_train_shards, y_train_shards), (X_test_shards, y_test_shards)", "def load_cifar10s(data_dir, use_augmentation=False, aux_take_amount=None, \n aux_data_filename='/cluster/scratch/rarade/cifar10s/ti_500K_pseudo_labeled.pickle', \n aux_take_ids_path=None,\n validation=False):\n data_dir = re.sub('cifar10s', 'cifar10', data_dir)\n test_transform = transforms.Compose([transforms.ToTensor()])\n if use_augmentation:\n train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(0.5), \n transforms.ToTensor()])\n else: \n train_transform = test_transform\n \n train_dataset = SemiSupervisedCIFAR10(base_dataset='cifar10', root=data_dir, train=True, download=True, \n transform=train_transform, aux_data_filename=aux_data_filename, \n add_aux_labels=True, aux_take_amount=aux_take_amount, aux_take_ids_path=aux_take_ids_path,\n validation=validation)\n test_dataset = SemiSupervisedCIFAR10(base_dataset='cifar10', root=data_dir, train=False, download=True, \n transform=test_transform)\n if validation:\n val_dataset = torchvision.datasets.CIFAR10(root=data_dir, train=True, download=True, transform=test_transform)\n val_dataset = torch.utils.data.Subset(val_dataset, np.arange(0, 1024)) \n return train_dataset, test_dataset, val_dataset\n return train_dataset, test_dataset", "def load_data(label_mode='fine'):\n if label_mode not in ['fine', 'coarse']:\n raise ValueError('`label_mode` must be one of `\"fine\"`, `\"coarse\"`.')\n\n dirname = 'cifar-100-python'\n origin = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n path = get_file(dirname, origin=origin, untar=True)\n\n fpath = os.path.join(path, 'train')\n x_train, y_train = load_batch(fpath, label_key=label_mode + '_labels')\n\n fpath = os.path.join(path, 'test')\n x_test, y_test = load_batch(fpath, label_key=label_mode + '_labels')\n\n y_train = np.reshape(y_train, (len(y_train), 1))\n y_test = np.reshape(y_test, (len(y_test), 1))\n\n # Rescale raw data\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n\n x_train /= 255.\n x_test /= 255.\n\n if K.image_data_format() == 'channels_last':\n x_train = x_train.transpose(0, 2, 3, 1)\n x_test = x_test.transpose(0, 2, 3, 1)\n\n return (x_train, y_train), (x_test, y_test)", "def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()", "def create_trainval_dataloaders(params):\n # ----------------Create Dataset objects and Dataloaders----------------\n mr_dataset_train, tokenizer = get_dataset(params, run_mode=\"train\")\n params.vocab_size = tokenizer.get_vocab_size()\n print(\"SystemLog: Vocab size used for training is %d\" % (params.vocab_size))\n print(\"SystemLog: Number of items in the train dataset=%d\" % len(mr_dataset_train))\n sys.stdout.flush()\n # Collate Function pads the sequences to have a uniform length for the entire batch\n mr_dataloader_train = DataLoader(mr_dataset_train, batch_size=params.batch_size,\n shuffle=True, num_workers=params.num_workers, collate_fn=CollateMRSequence(params.architecture))\n\n mr_dataset_valid, _ = get_dataset(params, run_mode=\"valid\")\n print(\"SystemLog: Number of items in the valid dataset=%d\" % len(mr_dataset_valid))\n mr_dataloader_valid = DataLoader(mr_dataset_valid, batch_size=params.batch_size_validation,\n shuffle=False, num_workers=0, collate_fn=CollateMRSequence(params.architecture))\n\n return mr_dataset_train, mr_dataloader_train, mr_dataset_valid, mr_dataloader_valid", "def train(self, train_loader):\n pass", "def get_loader(image_dir, attr_path, selected_attrs, crop_size=178, image_size=128, \n batch_size=16, dataset='CelebA', mode='train', num_workers=0):\n transform = []\n if mode == 'train':\n transform.append(T.RandomHorizontalFlip())\n transform.append(T.CenterCrop(crop_size))\n transform.append(T.Resize(image_size))\n transform.append(T.ToTensor())\n transform.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))\n transform = T.Compose(transform)\n\n if dataset == 'CelebA':\n dataset = CelebA(image_dir, attr_path, selected_attrs, transform, mode)\n elif dataset == 'RaFD':\n dataset = ImageFolder(image_dir, transform)\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=(mode=='train'),\n num_workers=num_workers)\n return data_loader", "def load_celebA():\n channel_mean = [0.50003925, 0.42008925, 0.37377534]\n channel_std = [0.30878809, 0.28794379, 0.28661432]\n\n dataset = CelebA(transform=transforms.Compose([\n transforms.CenterCrop(178),\n transforms.Resize(64),\n transforms.ToTensor(),\n transforms.Normalize(channel_mean, channel_std)\n ]))\n\n setattr(dataset, \"mean\", channel_mean)\n setattr(dataset, \"std\", channel_std)\n\n loader_params = {\n \"num_workers\": 2,\n \"shuffle\": True,\n \"batch_size\": 128\n } \n\n train = DataLoader(dataset, **loader_params)\n\n return train", "def Cifar10_preload_and_split(path=None, splits=[0.4, 0.1, 0.25, 0.25], transform=None):\n\n if path is None:\n path = DATASETS_DIR\n index_file = os.path.join(path, 'cifar10.index.csv')\n\n indices = None\n if os.path.exists(index_file):\n index_csv = np.loadtxt(index_file)\n indices = torch.tensor(index_csv)\n print('Found predefined indexing file {}'.format(index_file))\n \n trainset = torchvision.datasets.CIFAR10(path, train=True, transform=transform[0], download=False)\n testset = torchvision.datasets.CIFAR10(path, train=False, transform=transform[0], download=False)\n fullset = ConcatDataset([trainset, testset])\n print('Initializing CIFAR10Dataset splits')\n \n # Currently five equal splits\n dset_size = fullset.cumulative_sizes[-1]\n int_splits = []\n for i in range(len(splits)):\n int_splits.append(int(dset_size * splits[i]))\n if sum(int_splits) < dset_size:\n rem = dset_size - sum(int_splits)\n int_splits[-1] += rem\n\n indices, splitsets = dataset_split(fullset, int_splits, indices=indices)\n\n if not os.path.exists(index_file):\n print('No predefined indexing file found, so index permutations saving to {}'.format(index_file))\n np.savetxt(index_file, indices.numpy(), fmt='%i', delimiter=',')\n\n print('Finished splitting data.')\n\n return splitsets", "def load_cifar100(data_path=None, data_home=None, subsets=None,\n label_mode='fine'):\n if data_path is None:\n data_path = _utils.validate_data_home(data_home)\n data_path /= 'cifar-100-python.tar.gz'\n url = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n _ds_utils.get_file(data_path, url)\n \n if subsets is None:\n subsets = ['training', 'test']\n subsets = _ds_utils.validate_tvt(subsets, return_list=True)\n\n label_mode = _utils.validate_option(label_mode, ['fine', 'coarse'],\n name='label_mode')\n \n X, Y = [], []\n with arlib.open(data_path) as ar:\n for subset in subsets:\n if subset == 'training':\n name = [x for x in ar.member_names if x.endswith('train')]\n elif subset == 'test':\n name = [x for x in ar.member_names if x.endswith('test')]\n else:\n raise ValueError('Subset:', subset, ' not supported.')\n assert len(name) == 1\n name = name[0]\n tmp = _load_cifar_batch(ar.open_member(name, 'rb'),\n label_key=label_mode + '_labels')\n X.append(tmp[0])\n Y.append(tmp[1])\n return np.concatenate(X), np.concatenate(Y)", "def prepare_train_coco_data(args):\n image_dir, annotation_file, data_dir = args.train_coco_image_dir, args.train_coco_annotation_file, args.train_coco_data_dir\n batch_size = args.batch_size\n basic_model = args.basic_model\n num_roi = args.num_roi\n\n coco = COCO(annotation_file)\n\n img_ids = list(coco.imgToAnns.keys())\n img_files = []\n img_heights = []\n img_widths = []\n anchor_files = []\n gt_classes = []\n gt_bboxes = []\n\n for img_id in img_ids:\n img_files.append(os.path.join(image_dir, coco.imgs[img_id]['file_name'])) \n img_heights.append(coco.imgs[img_id]['height']) \n img_widths.append(coco.imgs[img_id]['width']) \n anchor_files.append(os.path.join(data_dir, os.path.splitext(coco.imgs[img_id]['file_name'])[0]+'_'+basic_model+'_anchor.npz')) \n\n classes = [] \n bboxes = [] \n for ann in coco.imgToAnns[img_id]: \n classes.append(coco_category_to_class[ann['category_id']]) \n bboxes.append([ann['bbox'][1], ann['bbox'][0], ann['bbox'][3]+1, ann['bbox'][2]+1]) \n\n gt_classes.append(classes) \n gt_bboxes.append(bboxes) \n \n print(\"Building the training dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths, batch_size, anchor_files, gt_classes, gt_bboxes, True, True)\n print(\"Dataset built.\")\n return coco, dataset", "def dataloaders():\n # train data path\n data_train = '../dataset/train/'\n # set transformations\n train_transforms = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n \n train_data = datasets.ImageFolder(data_train, transform = train_transforms)\n trainloader = torch.utils.data.DataLoader(train_data, batch_size = 16, shuffle = True)\n \n return trainloader", "def cifar10(transform=transforms.ToTensor()):\n\n\t# Directories\n\tscript_dir = os.path.dirname(os.path.realpath(__file__))\n\tdata_dir = os.path.join(script_dir, 'data', 'cifar10')\n\n\t# Load training set, downloading if necessary\n\tdataset = datasets.CIFAR10(data_dir, train=True, transform=transform, download=True)\n\n\t# Split into train, val, and test sets\n\n\t# Add 'classes' list and 'class_to_idx' lookup dictionary to both sets\n\tclasses = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', \n\t\t'horse', 'ship', 'truck']\n\tclass_to_idx = {classes[i]: i for i in range(len(classes))}\n\n\tdataset.classes = classes\n\tdataset.class_to_idx = class_to_idx\n\n\treturn dataset", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def load_data5():\n# dirname = 'cifar-10-batches-py'\n# origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n# path = get_file(dirname, origin=origin, untar=True)\n# path= './cifar-10-batches-py'\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n# Below shows a test class has 999 examples instead of the claimed 1000\n# tclasscount=np.zeros((10,), dtype=int)\n# for i in range(0, len(y_test)-1):\n# tclasscount[y_test[i][0]]= tclasscount[y_test[i][0]] + 1\n# print('Test class count',tclasscount)\n num_train_samples = 50000\n num_5_class = 25000\n num_5_test = 4999 # should be 5000 if all the categories had 1000 in them but they do not. One is missing.\n print('x_train shape orig:', x_train.shape)\n print('More:', x_train.shape[1:])\n print('y_test shape',y_test.shape)\n\n x5_train = np.empty((num_5_class, 32, 32, 3), dtype='uint8')\n y5_train = np.empty((num_5_class,), dtype='uint8')\n\n count=0\n\n for i in range(0, len(y_train)-1):\n if (y_train[i][0] == 2) or (y_train[i][0] == 3) or (y_train[i][0] == 4) or (y_train[i][0] == 5) or (y_train[i][0] == 7):\n x5_train[count]=x_train[i]\n y5_train[count]=y_train[i]\n count=count+1\n \n # find test data of interest\n count=0\n x5_test=np.empty((num_5_test, 32, 32, 3), dtype='uint8')\n y5_test= np.empty((num_5_test,), dtype='uint8')\n\n for i in range(0, len(y_test)-1):\n if (y_test[i][0] == 2) or (y_test[i][0] == 3) or (y_test[i][0] == 4) or (y_test[i][0] == 5) or (y_test[i][0] == 7):\n x5_test[count]=x_test[i]\n y5_test[count]=y_test[i]\n count=count+1\n# Below shows class 7 is only 999 and not 1000 examples!!! One horse got away it seems.\n# if(y_test[i][0] == 2):\n# c2=c2+1\n# if(y_test[i][0] == 3):\n# c3=c3+1\n# if(y_test[i][0] == 4):\n# c4=c4+1\n# if(y_test[i][0] == 5):\n# c5=c5+1\n# if(y_test[i][0] == 7):\n# c7=c7+1\n# print('c2count, c3count, c4count, c5count, c7count',c2,c3,c3,c5,c7)\n# print('y5tstshape',y5_test.shape, count)\n# print('y5tst',y5_test)\n# return (x_train, y_train), (x_test, y_test)\n return (x5_train, y5_train), (x5_test, y5_test)", "def load_crawl():\n\n\tmodule_path = dirname(__file__)\n\twith open(join(module_path, 'data', 'train2.csv')) as csv_file:\n\t\tdata_file = csv.reader(csv_file)\n\t\ttemp = next(data_file)\n\t\tglobal n_samples\n\t\tn_samples = int(temp[0])\n\t\tglobal n_features\n\t\tn_features = int(temp[1])\n\t\tprint \"n samples \" + str((n_samples))\n\t\tprint \"n_features\" + str((n_features))\n\t\ttarget_names = np.array(temp[2:4])\n\t\tdata = np.empty((n_samples, n_features))\n\t\ttarget = np.empty((n_samples,), dtype=np.int)\n\n\t\tfor count, value in enumerate(data_file):\n\t\t\tdata[count] = np.asarray(value[:-1], dtype=np.float)\n\t\t\ttarget[count] = np.asarray(value[-1], dtype=np.int)\n\t\t\t#print \"data is \" + str(data[count])\n\t\t\t#print \"target is \" + str(target[count])\n\t\tprint \"Number of target records is \" + str(len(target))\n\t#with open(join(module_path, 'descr', 'train.rst')) as rst_file:\n\t#\tfdescr = rst_file.read()\n\n\treturn Bunch(data=data, target=target,\n\t\t\t target_names=target_names,\n\t\t\t DESCR=None,\n\t\t\t feature_names = ['evalCount', 'setInterval', 'setTimeout', 'link', \n\t\t\t\t\t\t\t 'search', 'exec','escape', 'unescape', 'ratio', \n\t\t\t\t\t\t\t 'emtropyAvg', 'entropyScript', 'longStrings', \n\t\t\t\t\t\t\t 'maxEntropy', 'stringAvg', 'maxLength', 'longVarFunc', \n\t\t\t\t\t\t\t 'stringAssignments', 'stringModFuncsCount', 'eventFuncsCount', \n\t\t\t\t\t\t\t 'domModFuncsCounter', 'suspStrings', 'whiteSpaceRatio', \n\t\t\t\t\t\t\t 'hexaStrings', 'maxNonPrintableCharactersinString', 'lineAvg', \n\t\t\t\t\t\t\t 'iframeCount', 'malTagCount', 'jsLength'])", "def get_cifar10_c(dataset_root, batch_size, train, noise_type):\n \n # image pre-processing\n pre_process = transforms.Compose([transforms.Resize(28),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=(0.491, 0.482, 0.446), \n std=(0.202, 0.199, 0.201)\n )\n ])\n\n # datasets and data loader\n cifar10_c_dataset = GetLoader(\n data_root=os.path.join(dataset_root),\n noise_type=noise_type,\n transform=pre_process)\n \n print(\"dataset len: \",len(cifar10_c_dataset))\n\n train_size = int(0.8 * len(cifar10_c_dataset))\n test_size = len(cifar10_c_dataset) - train_size\n\n train_dataset, test_dataset = torch.utils.data.random_split(cifar10_c_dataset, [train_size, test_size])\n print(\"train len: \",len(train_dataset))\n print(\"test len: \",len(test_dataset))\n if train:\n cifar10_c_dataset=train_dataset\n else:\n cifar10_c_dataset=test_dataset\n\n cifar10_c_data_loader = torch.utils.data.DataLoader(\n dataset=cifar10_c_dataset,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True,\n num_workers=8)\n\n return cifar10_c_data_loader", "def get_loader(config):\n train_transform = [T.Resize((256, 128)), T.RandomHorizontalFlip(), T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]\n train_transform = T.Compose(train_transform)\n\n test_transform = [T.Resize((256, 128)), T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]\n test_transform = T.Compose(test_transform)\n\n # Datasets.\n if config.source_dataset in ['duke'] and config.target_dataset in ['market']:\n source_image_dir = config.duke_image_dir\n target_image_dir = config.market_image_dir\n elif config.source_dataset in ['market'] and config.target_dataset in ['duke']:\n source_image_dir = config.market_image_dir\n target_image_dir = config.duke_image_dir\n else:\n assert 'Dataset not support!'\n source_set = ReidDataset(source_image_dir, train_transform)\n target_set = ReidDataset(target_image_dir, train_transform, config.expanding_cam)\n test_set = ReidDataset(source_image_dir, test_transform)\n\n # Dataloader.\n source_loader = data.DataLoader(dataset=source_set, batch_size=config.batch_size,\n num_workers=config.num_workers, shuffle=True, pin_memory=True, drop_last=True)\n\n target_loader = data.DataLoader(dataset=target_set, batch_size=config.batch_size,\n num_workers=config.num_workers, shuffle=True, pin_memory=True, drop_last=True)\n\n test_loader = data.DataLoader(dataset=test_set, batch_size=config.batch_size, num_workers=config.num_workers,\n shuffle=False, pin_memory=True, drop_last=False)\n\n return {'source_loader': source_loader, 'target_loader': target_loader, 'test_loader': test_loader}", "def create_ds(self, data, is_train=True):\n ds = tf.data.Dataset.from_tensor_slices(data)\n map_fn = lambda x, y: (cifar_process(x, is_train), y)\n ds = ds.map(map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return ds", "def get_loader(m,image_dir, attr_path, selected_attrs, crop_size=178, image_size=128, \n batch_size=16, dataset='CelebA', mode='train', num_workers=1):\n transform = []\n transform.append(T.ToTensor())\n #transform.append(T.Normalize(mean=[0.5], std=[0.5]))\n transform = T.Compose(transform)\n\n if dataset == 'CelebA':\n dataset = CelebA(image_dir, attr_path, selected_attrs, transform, mode)\n elif dataset == 'RaFD':\n if m==10:\n dataset = ImageFolder1(image_dir, transform,mode)\n else:\n dataset = ImageFolder(image_dir, transform,mode)\n \n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=(mode=='train'),\n num_workers=num_workers)\n return data_loader", "def download():\n\n trainset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=True, download=True)\n testset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=False, download=True)\n train_images = numpy.array(trainset.data)\n train_labels = numpy.array(trainset.targets)\n test_images = numpy.array(testset.data)\n test_labels = numpy.array(testset.targets)\n\n assert numpy.max(train_images) == 255\n\n train_images = train_images/255.\n test_images = test_images/255.\n\n utils.write_hdf5(paths.cifar10_train_images_file(), train_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_train_images_file())\n utils.write_hdf5(paths.cifar10_test_images_file(), test_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_test_images_file())\n utils.write_hdf5(paths.cifar10_train_labels_file(), train_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_train_labels_file())\n utils.write_hdf5(paths.cifar10_test_labels_file(), test_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_test_labels_file())", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def cifar100(path, label_mode='fine'):\n def _load_batch(filepath, label_key):\n with open(filepath, 'rb') as f:\n if sys.version_info < (3,):\n d = cPickle.load(f)\n else:\n d = cPickle.load(f, encoding='bytes')\n d_decoded = {} # decode utf8\n for k, v in six.iteritems(d):\n d_decoded[k.decode('utf8')] = v\n d = d_decoded\n images = d['data']\n labels = d[label_key]\n images = images.reshape(images.shape[0], 3, 32, 32)\n labels = np.reshape(labels, len(labels,))\n return images, labels\n path = os.path.expanduser(path)\n directory = 'cifar-100-python'\n if not os.path.exists(os.path.join(path, directory)):\n url = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n maybe_download_and_extract(path, url)\n\n filepath = os.path.join(path, directory, 'train')\n x_train, y_train = _load_batch(filepath, label_mode + '_labels')\n\n filepath = os.path.join(path, directory, 'test')\n x_test, y_test = _load_batch(filepath, label_mode + '_labels')\n return (x_train, y_train), (x_test, y_test)", "def get_dataloaders(logging, batch_size):\n # Load Data\n logging.info(\"Reading Train and Test data...\")\n train_df = pd.read_csv(\"C:/tmp/avila_classification/data/avila-tr.txt\", header=None)\n test_df = pd.read_csv(\"C:/tmp/avila_classification/data/avila-ts.txt\", header=None)\n\n # Fix column names\n col_names = ['col_' + str(j + 1) for j in range(train_df.shape[1] - 1)]\n indep_cols = col_names.copy()\n col_names.append('y')\n\n logging.debug(\"Assigning columns\")\n train_df.columns = col_names\n test_df.columns = col_names\n\n # Encode dependent variable column\n le = LabelEncoder()\n le.fit(train_df['y'])\n logging.debug(f\"Classes: {le.classes_}\")\n logging.debug(f\"Transformed Classes: {le.transform(le.classes_)}\")\n\n train_df['y_enc'] = le.transform(train_df['y'])\n test_df['y_enc'] = le.transform(test_df['y'])\n\n # train_df.head()\n logging.debug(f\"Shape of train data: {train_df.shape}\")\n logging.debug(f\"Shape of test data: {test_df.shape}\")\n\n # Create train and validation dataloaders\n train_ds = AvilaDataset(data_frame=train_df, indep_cols=indep_cols, dep_col='y_enc')\n valid_ds = AvilaDataset(data_frame=test_df, indep_cols=indep_cols, dep_col='y_enc')\n\n # Should be some exponent of 2 (128, 256)\n # batch_size = 256\n train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True)\n valid_dl = DataLoader(valid_ds, batch_size=batch_size, shuffle=False)\n\n return train_dl, valid_dl, le", "def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):\n # Load the raw CIFAR-10 data\n cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\n\n # Cleaning up variables to prevent loading data multiple times (which may cause memory issue)\n try:\n del X_train, y_train\n del X_test, y_test\n print('Clear previously loaded data.')\n except:\n pass\n\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n\n # Subsample the data\n mask = list(range(num_training, num_training + num_validation))\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = list(range(num_training))\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = list(range(num_test))\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n # Normalize the data: subtract the mean image\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n\n # Reshape data to rows\n X_train = X_train.reshape(num_training, -1)\n X_val = X_val.reshape(num_validation, -1)\n X_test = X_test.reshape(num_test, -1)\n\n return X_train, y_train, X_val, y_val, X_test, y_test", "def create_data(load=False):\n\n # When loading Pickle\n if load:\n class_mapping = cPickle.load(open(os.path.join(SAVE_PATH, \"class_mapping.p\"), \"rb\"))\n classes_count = cPickle.load(open(os.path.join(SAVE_PATH, \"classes_count.p\"), \"rb\"))\n train_imgs = cPickle.load(open(os.path.join(SAVE_PATH, \"train_imgs.p\"), \"rb\"))\n val_imgs = cPickle.load(open(os.path.join(SAVE_PATH, \"val_imgs.p\"), \"rb\"))\n print('loading pickles')\n return train_imgs, val_imgs, class_mapping, classes_count\n\n pascal_voc = PascalVoc()\n all_imgs, classes_count, class_mapping = pascal_voc.get_data(\"/home/roeih/PascalVoc/VOCdevkit\",\n pascal_data=['VOC2007'])\n\n # Add background class\n if 'bg' not in classes_count:\n classes_count['bg'] = 0\n class_mapping['bg'] = len(class_mapping)\n\n # Create json for the class for future use\n with open('classes.json', 'w') as class_data_json:\n json.dump(class_mapping, class_data_json)\n pprint.pprint(classes_count)\n\n # Shuffle the Data\n random.shuffle(all_imgs)\n\n train_imgs = [s for s in all_imgs if s['imageset'] == 'trainval']\n val_imgs = [s for s in all_imgs if s['imageset'] == 'test']\n print('Num train samples {}'.format(len(train_imgs)))\n print('Num val samples {}'.format(len(val_imgs)))\n return train_imgs, val_imgs, class_mapping, classes_count", "def get_dataset(self):\n\n trainset = datasets.CIFAR100('datasets/CIFAR100/train/', train=True, transform=self.train_transforms,\n target_transform=None, download=True)\n valset = datasets.CIFAR100('datasets/CIFAR100/test/', train=False, transform=self.val_transforms,\n target_transform=None, download=True)\n\n return trainset, valset", "def prepare_data(self,d):\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n #CONVERT TO FLOAT32\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))\n train = data_utils.TensorDataset(features,target)\n train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))\n uset = data_utils.TensorDataset(features,target)\n unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n \n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters", "def train_test_loaders(dataset, validation_ratio=0.2, **kwargs):\n dataset_size = len(dataset)\n test_size = int(np.floor(validation_ratio * dataset_size))\n train_size = dataset_size - test_size\n print('TRAIN SIZE {}'.format(train_size))\n print('TEST SIZE {}'.format(test_size))\n train_dataset, test_dataset = random_split(dataset, (train_size, test_size),\n generator=torch.Generator().manual_seed(RANDOM_SEED))\n train_loader = torch.utils.data.DataLoader(train_dataset, **kwargs)\n test_loader = torch.utils.data.DataLoader(test_dataset, **kwargs)\n return train_loader, test_loader", "def prepare(self):\n bcolz.set_nthreads(2)\n\n # step 0: load only when not loaded yet\n if TRAINING in self.data and VALIDATION in self.data: return\n\n # step 1: load the file names\n patients = sorted(glob.glob(self.location+'/*.*/'))\n print len(patients), \"patients\"\n\n # step 1: load the file names\n # make a stratified validation set\n # note, the seed decides the validation set, but it is deterministic in the names\n random.seed(317070)\n patient_names = [self.patient_name_from_file_name(f) for f in patients]\n validation_patients = random.sample(patient_names, int(VALIDATION_SET_SIZE*len(patient_names)))\n\n labels_as_dict = defaultdict(list)\n\n with open(paths.LUNA_LABELS_PATH, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(reader) # skip the header\n for row in reader:\n label = (float(row[1]), float(row[2]), float(row[3]), float(row[4]))\n labels_as_dict[str(row[0])].append(label)\n\n # make the static data empty\n for s in self.datasets:\n self.data[s] = []\n self.labels[s] = []\n self.names[s] = []\n self.spacings[s] = []\n self.origins[s] = []\n\n with gzip.open(paths.INTERMEDIATE_DATA_PATH + 'spacings.pkl.gz') as f:\n spacings = cPickle.load(f)\n\n with gzip.open(paths.INTERMEDIATE_DATA_PATH + 'origins.pkl.gz') as f:\n origins = cPickle.load(f)\n\n # load the filenames and put into the right dataset\n for i, patient_folder in enumerate(patients):\n patient_id = str(patient_folder.split(path.sep)[-2])\n if patient_id in validation_patients:\n dataset = VALIDATION\n else:\n dataset = TRAIN\n\n\n label = labels_as_dict[patient_id]\n if self.only_positive and not label:\n continue\n\n self.data[dataset].append(patient_folder)\n self.labels[dataset].append(label)\n self.names[dataset].append(patient_id)\n self.spacings[dataset].append(spacings[patient_id])\n self.origins[dataset].append(origins[patient_id])\n\n # give every patient a unique number\n last_index = -1\n for set in self.datasets:\n self.indices[set] = range(last_index+1,last_index+1+len(self.data[set]))\n if len(self.indices[set]) > 0:\n last_index = self.indices[set][-1]\n print set, len(self.indices[set]), \"samples\"", "def load_data():\r\n train_dataset = h5py.File('train_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\r\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\r\n\r\n test_dataset = h5py.File('test_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\r\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\r\n\r\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\r\n \r\n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\r\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\r\n \r\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes" ]
[ "0.7297965", "0.71935004", "0.7103292", "0.70913583", "0.6974186", "0.69686836", "0.6958308", "0.68643886", "0.6819047", "0.67896754", "0.6746235", "0.67130595", "0.67096114", "0.6674631", "0.6631123", "0.65942234", "0.6579366", "0.65782934", "0.65747774", "0.65689075", "0.65604925", "0.65158045", "0.6504113", "0.64948", "0.64812875", "0.6460749", "0.6453278", "0.64397603", "0.64286333", "0.6404992", "0.6379206", "0.6368727", "0.63607365", "0.6359681", "0.6343035", "0.6336632", "0.63363457", "0.6324379", "0.6323351", "0.63230443", "0.6318173", "0.63167524", "0.63139826", "0.63044775", "0.62913644", "0.6290019", "0.62662226", "0.62561923", "0.6254163", "0.6249203", "0.6241465", "0.6239892", "0.62374467", "0.62358934", "0.62296706", "0.6226722", "0.6226662", "0.62099457", "0.62074107", "0.62050235", "0.61828226", "0.6181983", "0.6175183", "0.6164156", "0.6144432", "0.6142538", "0.6133517", "0.61331487", "0.6130145", "0.6127777", "0.6125403", "0.6113505", "0.6109234", "0.6103872", "0.6102937", "0.6100808", "0.60833216", "0.60738105", "0.60712737", "0.6068849", "0.60621595", "0.60588014", "0.6051871", "0.6051499", "0.6041302", "0.6040342", "0.60400915", "0.60400546", "0.603877", "0.6033066", "0.60221016", "0.60218847", "0.60191643", "0.601233", "0.6007278", "0.59993225", "0.59941894", "0.59941643", "0.5991086", "0.5988384", "0.59854674" ]
0.0
-1
Return minibatch shapes for contrastive algorithms. It is especially useful when you use batch norm.
def get_shape_for_contrastive_learning(mini_batch_size: int, block_size: int, neg_size: int, dim_h: int) -> tuple: batch2input_shape_pos = [mini_batch_size * block_size, 3, 32, 32] batch2input_shape_neg = [mini_batch_size * neg_size * block_size, 3, 32, 32] output2emb_shape_pos = [mini_batch_size, block_size, dim_h] output2emb_shape_neg = [mini_batch_size, neg_size, block_size, dim_h] return batch2input_shape_pos, batch2input_shape_neg, output2emb_shape_pos, output2emb_shape_neg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_consistent_shape(images: Iterable):\n dim0s = []\n dim1s = []\n\n for img in images:\n dim0s.append(img.shape[0])\n dim1s.append(img.shape[1])\n\n assert len(set(dim0s)) == 1 and len(set(dim1s)) == 1, 'Inconsistent shapes.'\n\n return dim0s[0], dim1s[0]", "def input_shape(self):\n return [None, 32, 32, 1]", "def input_shape(self):\n return [None, 32, 32, 1]", "def input_shape(self):\n return [None, 32, 32, 1]", "def ggn_factor_inner_shape(self) -> Sequence[int]:\n pass", "def get_img_shape(img):\n if K.image_dim_ordering() == 'th':\n return K.int_shape(img)\n else:\n samples, w, h, c = K.int_shape(img)\n return samples, c, w, h", "def fisher_factor_inner_shape(self) -> Sequence[int]:\n pass", "def _ExtractInputShapes(inputs):\n if context.executing_eagerly():\n return array_ops.shape_n(inputs)\n sizes = []\n fully_known = True\n for x in inputs:\n input_shape = array_ops.shape(x)\n if not isinstance(input_shape,\n tensor.Tensor) or input_shape.op.type != \"Const\":\n fully_known = False\n break\n sizes.append(input_shape)\n\n if fully_known:\n return sizes\n else:\n return array_ops.shape_n(inputs)", "def kernel_shape(self):\n\t\treturn self.weights_shape()", "def first_layer_shape(self):\n if K.BACKEND == 'pytorch':\n if self.lookback == 1:\n return [-1, self.num_ins]\n else:\n return [-1, self.lookback, self.num_ins]\n\n if self.num_input_layers > 1:\n shapes = {}\n for lyr in self.inputs:\n shapes[lyr.name] = lyr.shape\n return shapes\n shape = []\n for idx, d in enumerate(self.nn_layers()[0].input.shape):\n if int(tf.__version__[0]) == 1:\n if isinstance(d, tf.Dimension): # for tf 1.x\n d = d.value\n\n if idx == 0: # the first dimension must remain undefined so that the user may define batch_size\n d = -1\n shape.append(d)\n return shape", "def get_shape_wb(self):\n return self.weights.shape, self.bias.shape", "def input_type_shapes(self):\n return self._input_type_shapes", "def get_shapes(imshp=(1, 1), kshp=(1, 1), subsample=(1, 1),\r\n img_stride=(1, 1), kern_stride=(1, 1)):\r\n return [\r\n #stack only\r\n ((1, 2) + imshp, (1, 2) + kshp, subsample, img_stride, kern_stride),\r\n #batch only\r\n ((3, 1) + imshp, (1, 1) + kshp, subsample, img_stride, kern_stride),\r\n #nkern only\r\n ((1, 1) + imshp, (2, 1) + kshp, subsample, img_stride, kern_stride),\r\n #batch and nkern\r\n ((3, 1) + imshp, (2, 1) + kshp, subsample, img_stride, kern_stride),\r\n #batch and stack\r\n ((3, 2) + imshp, (1, 2) + kshp, subsample, img_stride, kern_stride),\r\n #stack and nkern\r\n ((1, 2) + imshp, (2, 2) + kshp, subsample, img_stride, kern_stride),\r\n #batch, nkern and stack\r\n ((2, 2) + imshp, (2, 2) + kshp, subsample, img_stride, kern_stride),\r\n #batch, nkern and stack\r\n ((3, 2) + imshp, (4, 2) + kshp, subsample, img_stride, kern_stride)\r\n ]", "def get_shapes(imshp=(1, 1), kshp=(1, 1), subsample=(1, 1),\r\n img_stride=(1, 1), kern_stride=(1, 1)):\r\n return [\r\n #stack only\r\n ((1, 2) + imshp, (1, 2) + kshp, subsample, img_stride, kern_stride),\r\n #batch only\r\n ((3, 1) + imshp, (1, 1) + kshp, subsample, img_stride, kern_stride),\r\n #nkern only\r\n ((1, 1) + imshp, (2, 1) + kshp, subsample, img_stride, kern_stride),\r\n #batch and nkern\r\n ((3, 1) + imshp, (2, 1) + kshp, subsample, img_stride, kern_stride),\r\n #batch and stack\r\n ((3, 2) + imshp, (1, 2) + kshp, subsample, img_stride, kern_stride),\r\n #stack and nkern\r\n ((1, 2) + imshp, (2, 2) + kshp, subsample, img_stride, kern_stride),\r\n #batch, nkern and stack\r\n ((2, 2) + imshp, (2, 2) + kshp, subsample, img_stride, kern_stride),\r\n #batch, nkern and stack\r\n ((3, 2) + imshp, (4, 2) + kshp, subsample, img_stride, kern_stride)\r\n ]", "def shape_for_keras(data):\n raise NotImplementedError", "def _infer_shape(schema):\n for feature in schema.feature:\n # Currently we infer shape only for features with valency 1.\n if (feature.presence.min_fraction == 1 and\n feature.value_count.min == feature.value_count.max == 1):\n feature.shape.dim.add().size = 1", "def get_target_shape(self):\n if not self.channels_first:\n return [None] + self.w + [self.nclasses]\n else:\n return [None] + [self.nclasses] + self.w", "def minibatch(input_, input_dim, num_kernels=32, kernel_dim=15, name='', reuse=False):\n with tf.variable_scope(name, reuse=reuse):\n W = tf.get_variable('{}/Wmb'.format(name), [input_dim, num_kernels * kernel_dim])\n b = tf.get_variable('{}/bmb'.format(name), [num_kernels * kernel_dim])\n\n x = tf.matmul(input_, W) + b\n activation = tf.reshape(x, (-1, num_kernels, kernel_dim))\n diffs = tf.expand_dims(activation, 3) - tf.expand_dims(tf.transpose(activation, [1, 2, 0]), 0)\n abs_diffs = tf.reduce_mean(tf.abs(diffs), 2)\n minibatch_features = tf.reduce_mean(tf.exp(-abs_diffs), 2)\n\n return minibatch_features", "def show_shape_metrics(shape):", "def compute_shape(image_shape, pyramid_levels):\n image_shape = np.array(image_shape[:2])\n image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in pyramid_levels]\n return image_shapes", "def infer_shape(self, node, input_shapes):\r\n \"\"\"TODO: Your code here\"\"\"\r\n assert len(input_shapes) == 1\r\n N, C, H, W = input_shapes[0]\r\n p_H = (H + 2 * self.padding - self.kernel_H) / self.stride + 1\r\n p_W = (W + 2 * self.padding - self.kernel_W) / self.stride + 1\r\n return (N, C, p_H, p_W)", "def basisShape(bases):\n \n shape = []\n size = 1 \n for b in bases:\n if np.isscalar(b):\n shape.append(1) # singleton dimension\n else:\n shape.append(b.nb) # the basis dimension\n size = size * b.nb \n \n return tuple(shape), size", "def standard_discriminator(images):\n activation = tf.nn.leaky_relu\n outputs = 2 * images - 1\n outputs = tf.layers.conv2d(outputs, 64, 4, strides=2, activation=activation)\n for num_filters in [128, 256, 512]:\n strides = 2\n if num_filters == 512:\n strides = 1\n outputs = tf.layers.conv2d(outputs, num_filters, 4, strides=strides, use_bias=False)\n outputs = activation(instance_norm(outputs))\n return tf.layers.conv2d(outputs, 1, 1)", "def minibatcher(inputs, targets, batchsize, shuffle=False):", "def get_minibatch(roidb, num_classes):\n num_images = len(roidb)\n # Sample random scales to use for each image in this batch\n random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),\n size=num_images)\n assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \\\n 'num_images ({}) must divide BATCH_SIZE ({})'. \\\n format(num_images, cfg.TRAIN.BATCH_SIZE)\n rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images\n fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)\n\n # Get the input image blob, formatted for caffe\n im_blob, im_scales, gt_boxes = _get_image_blob(roidb, random_scale_inds)\n\n blobs = {'data': im_blob}\n\n assert len(roidb) == 1, \"Single batch only\"\n # print 'gt_boxes when getting minibatch:', gt_boxes\n blobs['gt_boxes'] = gt_boxes\n blobs['im_info'] = np.array(\n [[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],\n dtype=np.float32)\n\n return blobs", "def image_shape(self):\n return tuple(self._img_shape)", "def output_shape(self) ->torch.Size:\n input_shape = self.input_shape\n if self._reduce_mode in {None, 'none', 'None'}:\n return input_shape\n elif self._reduce_mode == 'concat':\n if len(input_shape) > 1:\n return input_shape[:-2] + (input_shape[-1] * input_shape[-2],)\n return input_shape\n else:\n return input_shape[1:]", "def gencastshapes():\n for n in range(32):\n yield [n]\n ndim = randrange(4, 6)\n minshape = 1 if randrange(100) > 80 else 2\n yield [randrange(minshape, 5) for _ in range(ndim)]\n ndim = randrange(2, 4)\n minshape = 1 if randrange(100) > 80 else 2\n yield [randrange(minshape, 5) for _ in range(ndim)]", "def gencastshapes():\n for n in range(32):\n yield [n]\n ndim = randrange(4, 6)\n minshape = 1 if randrange(100) > 80 else 2\n yield [randrange(minshape, 5) for _ in range(ndim)]\n ndim = randrange(2, 4)\n minshape = 1 if randrange(100) > 80 else 2\n yield [randrange(minshape, 5) for _ in range(ndim)]", "def shape(self):\n\n result = list(self.data.shape)\n included = np.count_nonzero(self.mask.to_array(dtype=bool))\n #iteration stops when producer or mask runs out\n result[self.axis] = min(self.data.shape[self.axis], included)\n\n return tuple(result)", "def g_factor_shape(self) -> tuple[int, int]:\n dim0_size = self.module.weight.size(0) # type: ignore\n if self.parallelism == 'output':\n x = dim0_size * self.model_parallel_world_size\n else:\n x = dim0_size\n return (x, x)", "def a_factor_shape(self) -> tuple[int, int]:\n dim1_size = self.module.weight.size(1) # type: ignore\n if self.parallelism == 'input':\n x = (dim1_size * self.model_parallel_world_size) + int(\n self.has_bias(),\n )\n else:\n x = dim1_size + int(self.has_bias())\n return (x, x)", "def compute_backbone_shapes(config, image_shape):\n # Currently supports ResNet only\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\n return np.array(\n [[int(math.ceil(image_shape[0] / stride)),\n int(math.ceil(image_shape[1] / stride))]\n for stride in config.BACKBONE_STRIDES])", "def get_style_predict_executor_shape(self):\n return self.style_predict_executor.get_shape()", "def getstartingshape(vals):\n \n return 1", "def estimate_input_shape(input_sample):\n if len(input_sample) == 1: # ligand-only\n if len(input_sample[0].shape[1:]) == 1: # for fingerprint: vector\n return input_sample[0].shape[1]\n else:\n return input_sample[0].shape[1:] # for smiles: matrix\n else: # kinase-informed\n if len(input_sample[0].shape[1:]) == 1: # for hash + composition: vectors\n return (input_sample[0].shape[1], input_sample[1].shape[1])\n else:\n return (\n input_sample[0].shape[1:],\n input_sample[1].shape[1:],\n ) # for seq.: matrix", "def batch_size(self):\n return self._first_rgb.shape[0]", "def get_args_shape(self):\n if not self.is_built:\n raise RuntimeError(\"A plan needs to be built before input shapes can be known.\")\n\n return [ph.expected_shape for ph in self.role.input_placeholders()]", "def concrete_shape(self):\n return tuple(int(x) for x in self.shape)", "def FoldBatchNorms(graph):\n _FoldFusedBatchNorms(graph)\n _FoldUnfusedBatchNorms(graph)", "def obtain_training_set_shape(para, alg):\n \n \n # Preliminaries\n z = os.listdir('Images/shapeset') # image directory\n box_how = [] # the ratio of box's height over its width\n omega = np.load('omega' + alg + '.npy') # load parameters\n \n # Establish a typical bounding box shape\n for i in range(len(z)):\n tu = img.imread('Images/shapeset/' + z[i])\n tu_b = obtain_testing_y(tu, omega, alg)\n tu_b = tu_b.astype(np.uint8) # convert binary image to a format that @findContours can process\n \n # find contours of objects with wanted color\n contours, hierachy = cv2.findContours(tu_b, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # the binary image will be replaced by this binary contour image\n cv2.drawContours(tu_b, contours, -1, (255, 0, 0), 3) # -1 = draw all contours, (color), thickness of contour lines\n \n # get contours edges, namely bounding box\n tu_b = label(tu_b) # label connected regions of an integer array, so that unconnected contours will considered as separate regions\n region = regionprops(tu_b) # identify regions of the labeled image\n rc = [] # region's centroids\n\n # get rid of tiny regions\n for prop in region.copy():\n if prop.bbox_area < para.bbox_area:\n region.remove(prop)\n else:\n rc.append(prop.centroid)\n \n # get rid of repeated regions\n ind = sorted(range(len(rc)), key = rc.__getitem__) # store element indices of local_centroid tuples before sorting\n rs = sorted(rc) # sorted region\n\n rdel = [] # repeated regions to be deleted\n for i in range(0, len(rs) - 1):\n if abs(rs[i+1][0] - rs[i][0]) < para.cent_dif and abs(rs[i+1][1] - rs[i+1][1]) < para.cent_dif:\n rdel.append(region.copy().pop(ind[i+1]))\n \n for i in range(len(rdel)):\n region.remove(rdel[i])\n \n # since only 1 object, only 1 region should be identified\n if len(region) > 1:\n for i in range(len(region)):\n print(region[i].centroid, region[i].bbox_area)\n plt.imshow(tu_b, cmap = 'gray')\n fig = plt.get_current_fig_manager()\n fig.window.setGeometry(400, 100, 3000, 2000)\n plt.title('You found more than 1 contour on this image!!!', fontsize = 66)\n else:\n minr, minc, maxr, maxc = region[0].bbox # max/min row/column coordinates\n box_how.append((maxr-minr)/(maxc-minc))\n \n # Store extreme values\n max_ratio = max(box_how)\n min_ratio = min(box_how) \n \n return max_ratio, min_ratio", "def get_blocks_shape(big_array, small_array):\n return tuple([int(b/s) for b, s in zip(big_array, small_array)])", "def get_model_shape(self):\n return self.nlay, self.nrow, self.ncol", "def get_shape(self):\n if not self.channels_first:\n return [None] + self.w + [self.numoffeatures]\n else:\n return [None] + [self.numoffeatures] + self.w", "def get_sample_shape(inputs):\n return tuple(inputs.size())[1:]", "def batch_size(self):\n self.validate_shape_and_dtype()\n return self.rgb.shape[0]", "def __extract_graph_shape(self):\n circuit = UbqcClient.pb_to_circuit(self.program)\n bw_pattern = transpile_to_brickwork(circuit)\n\n # Get shape\n input_ = bw_pattern.input_\n c_out, q_out = bw_pattern.output_\n output_ = c_out + q_out\n width = len(input_)\n depth = output_[0][1] - input_[0][1] + 1\n\n return width, depth", "def get_valid_anchor_weights_in_flattened_image(true_image_shapes, height,\n width):\n\n indices = tf.reshape(tf.range(height * width), [1, -1])\n batch_size = tf.shape(true_image_shapes)[0]\n batch_indices = tf.ones((batch_size, 1), dtype=tf.int32) * indices\n\n y_coords, x_coords, _ = row_col_channel_indices_from_flattened_indices(\n batch_indices, width, 1)\n\n max_y, max_x = true_image_shapes[:, 0], true_image_shapes[:, 1]\n max_x = _to_float32(tf.expand_dims(max_x, 1))\n max_y = _to_float32(tf.expand_dims(max_y, 1))\n\n x_coords = _to_float32(x_coords)\n y_coords = _to_float32(y_coords)\n\n valid_mask = tf.math.logical_and(x_coords < max_x, y_coords < max_y)\n\n return _to_float32(valid_mask)", "def get_minibatch(roidb):\n # We collect blobs from each image onto a list and then concat them into a\n # single tensor, hence we initialize each blob to an empty list\n blobs = {k: [] for k in get_minibatch_blob_names()}\n\n # Get the input image blob\n im_blob, im_scales = _get_image_blob(roidb)\n blobs['data'] = im_blob\n\n if cfg.RPN.RPN_ON:\n # RPN-only or end-to-end Faster/Mask R-CNN\n valid = roi_data.rpn.add_rpn_blobs(blobs, im_scales, roidb)\n elif cfg.RETINANET.RETINANET_ON:\n raise NotImplementedError\n else:\n # Fast R-CNN like models trained on precomputed proposals\n valid = roi_data.fast_rcnn.add_fast_rcnn_blobs(blobs, im_scales, roidb)\n return blobs, valid", "def preprocess_image(self, batched_inputs):\n images = [x.to(self.device) for x in batched_inputs]\n norms = [self.normalizer(x) for x in images]\n size = (norms[0].shape[1],norms[0].shape[2])\n images = ImageList.from_tensors(norms, self.backbone.size_divisibility)\n return images, size", "def compute_backbone_shapes(config, image_shape):\n if callable(config.BACKBONE):\n return config.COMPUTE_BACKBONE_SHAPE(image_shape)\n\n # Currently supports ResNet only\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\n return np.array(\n [[int(math.ceil(image_shape[0] / stride)),\n int(math.ceil(image_shape[1] / stride))]\n for stride in config.BACKBONE_STRIDES])", "def get_output_shape(self) -> List[int]:\n if -1 not in self.output_shape:\n return self.output_shape\n\n total_input_dims = np.prod(self.input_shape)\n\n dim = 1\n for i in self.output_shape:\n if i != -1:\n dim *= i\n missing_dim = int(total_input_dims / dim)\n\n output_shape = self.output_shape\n for ix, dim in enumerate(output_shape):\n if dim == -1:\n output_shape[ix] = missing_dim\n\n return output_shape", "def compute_prototypes(self):\n # [num train images, 1, embedding size].\n train_embeddings = tf.expand_dims(self.train_embeddings, 1)\n\n # [num train labels, num classes] where each row is a one-hot-encoded label.\n one_hot_train_labels = tf.one_hot(self.episode.train_labels, self.way)\n # [num train labels, num classes, 1].\n one_hot_train_labels = tf.expand_dims(one_hot_train_labels, 2)\n\n # Sums each class' embeddings. [num classes, embedding size].\n class_sums = tf.reduce_sum(one_hot_train_labels * train_embeddings, 0)\n\n # The prototype of each class is the average embedding of its train points.\n class_num_images = tf.reduce_sum(one_hot_train_labels, 0) # [way].\n prototypes = class_sums / class_num_images\n return prototypes", "def conver_testing_shape(args):\n testing_shape = [int(args.testing_shape), int(args.testing_shape)]\n return testing_shape", "def get_output_shape(self):\n return []", "def get_embeddings_shape(self):\n return [4 * self.max_tree_len + 2]", "def compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None] + output_image_shape + [n_filters])", "def compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None, n_filters] + output_image_shape)", "def compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None, n_filters] + output_image_shape)", "def __find_net_dims(self):\n\n input_params = INPUT_CHANNELS * INPUT_SIZE ** 2\n net_dims = [input_params]\n for w in self._conv_weights + self._lin_weights:\n net_dims.append(w.shape[0])", "def calculate_image_possibilities():\n\n # Reordering the color ramps in the palette yields 3! combinations\n palette_reorder_possibilities = 6\n\n return len(palettes) * palette_reorder_possibilities * len(grips) * len(pommels) * len(crossguards) * len(blades)", "def get_grid_shape(num_examples):\n height = int(numpy.floor(numpy.sqrt(num_examples)))\n width = int(numpy.ceil(num_examples * 1. / height))\n\n return (height, width)", "def local_thickness(im):\n from skimage.morphology import cube\n if im.ndim == 2:\n from skimage.morphology import square as cube\n dt = spim.distance_transform_edt(im)\n sizes = sp.unique(sp.around(dt, decimals=0))\n im_new = sp.zeros_like(im, dtype=float)\n for r in tqdm(sizes):\n im_temp = dt >= r\n im_temp = spim.distance_transform_edt(~im_temp) <= r\n im_new[im_temp] = r\n # Trim outer edge of features to remove noise\n im_new = spim.binary_erosion(input=im, structure=cube(1))*im_new\n return im_new", "def shapeCompare(*args, **kwargs)->int:\n pass", "def layers_in_shapes(self) -> dict:\n shapes = {}\n\n for lyr in self.layers:\n shapes[lyr.name] = lyr.input_shape\n\n return shapes", "def _collect_input_shape(input_tensors):\n input_tensors = to_list(input_tensors)\n shapes = []\n for x in input_tensors:\n try:\n shapes.append(K.int_shape(x))\n except TypeError:\n shapes.append(None)\n return unpack_singleton(shapes)", "def wavelet_decoded_subbands_shapes(min_shape, max_shape):\n\n levels = int(np.sqrt(max_shape[0] // min_shape[0]))\n shapes = [(min_shape[0] * (np.power(2, i)), min_shape[1] * (np.power(2, i))) for i in range(0, levels + 1)]\n return shapes", "def get_output_shape(self):\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[:-1]] + [self.n_units]", "def input_shape(self) ->torch.Size:\n pass", "def image_shape(self) -> np.ndarray:\n return self.__image_shape", "def get_embeddings_shape(self):\n raise NotImplementedError", "def sample_features_shape(self):\n return ()", "def _FindFusedBatchNorms(graph):\n input_pattern = graph_matcher.OpTypePattern('*')\n weight_pattern = graph_matcher.OpTypePattern('*')\n gamma_pattern = graph_matcher.OpTypePattern('*')\n beta_pattern = graph_matcher.OpTypePattern('*')\n mean_pattern = graph_matcher.OpTypePattern('*')\n variance_pattern = graph_matcher.OpTypePattern('*')\n\n conv_pattern = graph_matcher.OpTypePattern(\n 'Conv2D|DepthwiseConv2dNative', inputs=[input_pattern, weight_pattern])\n # MatMul has a Reshape between it and FusedBatchNorm.\n matmul_pattern = graph_matcher.OpTypePattern(\n 'MatMul', inputs=[input_pattern, weight_pattern])\n matmul_reshape_pattern = graph_matcher.OpTypePattern(\n 'Reshape', inputs=[matmul_pattern,\n graph_matcher.OpTypePattern('*')])\n\n conv_batch_norm_pattern = graph_matcher.OpTypePattern(\n 'FusedBatchNorm',\n inputs=[\n conv_pattern, gamma_pattern, beta_pattern, mean_pattern,\n variance_pattern\n ])\n matmul_batch_norm_pattern = graph_matcher.OpTypePattern(\n 'FusedBatchNorm',\n inputs=[\n matmul_reshape_pattern, gamma_pattern, beta_pattern, mean_pattern,\n variance_pattern\n ])\n matmul_bn_output_reshape_pattern = graph_matcher.OpTypePattern(\n 'Reshape',\n inputs=[matmul_batch_norm_pattern,\n graph_matcher.OpTypePattern('*')])\n\n conv_matcher = graph_matcher.GraphMatcher(conv_batch_norm_pattern)\n matmul_matcher = graph_matcher.GraphMatcher(matmul_bn_output_reshape_pattern)\n\n def _GetCommonTensors(match_result, bn_op, bn_input_tensor):\n \"\"\"Gets tensors needed for FusedBatchNormMatch from match_result.\"\"\"\n input_tensor = match_result.get_tensor(input_pattern)\n weight_tensor = match_result.get_tensor(weight_pattern)\n gamma_tensor = match_result.get_tensor(gamma_pattern)\n beta_tensor = match_result.get_tensor(beta_pattern)\n # FusedBatchNorm in training is different from that in inference. It takes\n # empty 'mean' and empty 'variance', and produces the mean and the variance\n # of the batch. Therefore, when is_training is true, mean_tensor and\n # variance_tensor point to 1st and 2nd (0-based) output of bn_op,\n # respectively; when is_training is false, they point to bn_op's inputs.\n is_training = bn_op.get_attr('is_training')\n if is_training:\n # FusedBatchNormGrad doesn't compute gradients of the batch_mean and\n # batch_variance outputs, so we need to substitute our own custom\n # gradient.\n # TODO(suharshs, raghuramank): Find a way to avoid needing this hack.\n # pylint: disable=protected-access\n bn_op._set_attr(\n '_gradient_op_type',\n attr_value_pb2.AttrValue(s=compat.as_bytes('FoldFusedBatchNormGrad')))\n # pylint: enable=protected-access\n mean_tensor = bn_op.outputs[1]\n # The batch variance used during forward and backward prop is biased,\n # i.e it is calculated as: V=sum(x(k)-mu)^2/N. For the moving average\n # calculation, the variance is corrected by the term N/N-1 (Bessel's\n # correction). The variance tensor read from FuseBatchNorm has bessel's\n # correction applied, so we undo it here.\n n = math_ops.cast(\n array_ops.size(bn_input_tensor) / array_ops.size(mean_tensor),\n dtypes.float32)\n variance_tensor = bn_op.outputs[2] * (n - 1) / n\n else:\n mean_tensor = match_result.get_tensor(mean_pattern)\n variance_tensor = match_result.get_tensor(variance_pattern)\n return (input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,\n variance_tensor)\n\n for match_result in conv_matcher.match_graph(graph):\n layer_op = match_result.get_op(conv_pattern)\n layer_tensor = match_result.get_tensor(conv_pattern)\n bn_op = match_result.get_op(conv_batch_norm_pattern)\n # In the case of convolution the output_tensor is the output of bn_op.\n output_tensor = bn_op.outputs[0]\n\n (input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,\n variance_tensor) = _GetCommonTensors(match_result, bn_op, layer_tensor)\n yield _FusedBatchNormMatch(\n layer_op=layer_op,\n bn_op=bn_op,\n output_tensor=output_tensor,\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n gamma_tensor=gamma_tensor,\n beta_tensor=beta_tensor,\n mean_tensor=mean_tensor,\n variance_tensor=variance_tensor)\n\n for match_result in matmul_matcher.match_graph(graph):\n layer_op = match_result.get_op(matmul_pattern)\n layer_tensor = match_result.get_tensor(matmul_pattern)\n bn_op = match_result.get_op(matmul_batch_norm_pattern)\n # In the MatMul case, the output of batch norm is reshaped back into a\n # 2D tensor, so the output_tensor is the output of the Reshape op.\n output_reshape_op = match_result.get_op(matmul_bn_output_reshape_pattern)\n output_tensor = output_reshape_op.outputs[0]\n\n (input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,\n variance_tensor) = _GetCommonTensors(match_result, bn_op, layer_tensor)\n yield _FusedBatchNormMatch(\n layer_op=layer_op,\n bn_op=bn_op,\n output_tensor=output_tensor,\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n gamma_tensor=gamma_tensor,\n beta_tensor=beta_tensor,\n mean_tensor=mean_tensor,\n variance_tensor=variance_tensor)", "def test_reduce_dimensionality(embeddings, shape):\n model = BERTopic()\n umap_embeddings = model._reduce_dimensionality(embeddings)\n assert umap_embeddings.shape == (shape, 5)", "def n_spectra(self):\n return np.product(self.image_shape)", "def createDiscriminator(imgShape):\n model = tf.keras.Sequential()\n\n model.add(layers.Conv2D(32, kernel_size = 3, strides = 2, input_shape = imgShape, padding = \"same\"))\n model.add(layers.LeakyReLU(alpha = 0.2))\n\n model.add(layers.Dropout(0.25))\n model.add(layers.Conv2D(64, kernel_size = 3, strides = 2, padding = \"same\"))\n model.add(layers.ZeroPadding2D(padding = ((0,1), (0,1))))\n model.add(layers.BatchNormalization(momentum = 0.8))\n model.add(layers.LeakyReLU(alpha = 0.2))\n\n model.add(layers.Dropout(0.25))\n model.add(layers.Conv2D(128, kernel_size = 3, strides = 2, padding = \"same\"))\n model.add(layers.BatchNormalization(momentum = 0.8))\n model.add(layers.LeakyReLU(alpha = 0.2))\n\n model.add(layers.Dropout(0.25))\n model.add(layers.Conv2D(256, kernel_size = 3, strides = 1, padding = \"same\"))\n model.add(layers.BatchNormalization(momentum = 0.8))\n model.add(layers.LeakyReLU(alpha = 0.2))\n\n model.add(layers.Dropout(0.25))\n model.add(layers.Conv2D(512, kernel_size = 3, strides = 1, padding = \"same\"))\n model.add(layers.BatchNormalization(momentum = 0.8))\n model.add(layers.LeakyReLU(alpha = 0.2))\n\n model.add(layers.Dropout(0.25))\n model.add(layers.Flatten())\n model.add(layers.Dense(1, activation = \"sigmoid\"))\n\n return model", "def input_shape(self):\n return self._ipt_shape", "def shape(self):\n return np.array([self.w, self.h])", "def shape(self):\n for component in ('x', 'y', 'z', 'r', 't'):\n arr = getattr(self, component)\n if arr is not None:\n return arr.shape\n return ()", "def _compute_kratios_multilayers(self):\n for i, layer in enumerate(self._layers.keys()):\n if not layer.is_thickness_known():\n raise ValueError(\"Thickness of layer %i is unknown\" % i)\n\n # Compute\n layer = list(self._layers.keys())[0]\n thickness_low_m = layer.thickness_m\n thickness_high_m = layer.thickness_m * 10\n step = 1\n\n _thicknesses, kratios = \\\n self.compute_kratio_vs_thickness(layer, thickness_low_m,\n thickness_high_m, step)\n\n # Reorganize results\n output = {}\n for experiment, kratio in kratios.items():\n output.setdefault(experiment, kratio[0])\n\n return output", "def inputShape(self):\n return self.input_shape", "def _pixel_shape(self):\n return np.array([1., 1.], dtype=np.float64) * self.pixel_size", "def test_min_matrix_shape(self):\n\n\t\tdetails = self.watcher.describe(min_evals=30)\n\t\tprint(details)\n\n\t\tfor nev in details.num_evals:\n\t\t\tself.assertGreaterEqual(nev, 30)", "def minibatch(self, size):\n indexes = self.sample(size)\n\n pre_states = np.array([self.get_state(index) for index in indexes], dtype=np.float32)\n post_states = np.array([self.get_state(index + 1) for index in indexes], dtype=np.float32)\n actions = self._actions[indexes]\n rewards = self._rewards[indexes]\n dones = self._terminals[indexes]\n\n return pre_states, actions, post_states, rewards, dones", "def test_rasterizer_return_correct_batch_shapes(self, shapes, dtypes,\n enable_cull_face):\n placeholders = self._create_placeholders(shapes, dtypes)\n frame_buffer = rasterization_backend.rasterize(\n placeholders[0], placeholders[1], placeholders[2],\n (self.IMAGE_WIDTH, self.IMAGE_HEIGHT), enable_cull_face,\n self._num_layers, self._backend).layer(0)\n batch_size = shapes[0][0]\n self.assertEqual([batch_size],\n frame_buffer.triangle_id.get_shape().as_list()[:-3])\n self.assertEqual([batch_size],\n frame_buffer.foreground_mask.get_shape().as_list()[:-3])", "def get_raw_image_sizes() -> set:\n sizes = set()\n data = SUNRGBDTrainDataset(True, augment=False)\n for i in range(len(data)):\n sizes.add(data[i][0].shape)\n return sizes", "def resnet101_base(freeze_blocks=[1,2,3], weight_regularizer=None, bias_regularizer=None):\n img_input = Input(shape=(None, None, 3))\n bn_axis = 3\n train1 = 1 not in freeze_blocks\n x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', trainable=train1, use_bias=False,\n kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(img_input)\n x = BatchNormalization(axis=bn_axis, name='bn_conv1', trainable=False)(x, training=False)\n x = Scale(axis=bn_axis, name='scale_conv1', trainable=False)(x)\n x = Activation('relu')(x)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n train2 = 2 not in freeze_blocks\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n\n train3 = 3 not in freeze_blocks\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n for i in range(1, 4):\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b' + str(i), trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n\n train4 = 4 not in freeze_blocks\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n for i in range(1, 23):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b' + str(i), trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n\n base_model = Model(img_input, x, name='resnet101')\n\n return base_model", "def batch_shape(self) -> torch.Size:\n self._check_if_fitted()\n return torch.Size([self.num_mcmc_samples])", "def test_shape_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert atom.lr.shape == atom.shape", "def assemble_softmax_3d(\n patches, spacing, orig_shape, n_classes, method=\"crop\", border=(16, 16, 16)\n):\n\n initial_array = np.zeros([orig_shape[0], orig_shape[1], orig_shape[2], n_classes])\n x_spacing = spacing[0]\n y_spacing = spacing[1]\n z_spacing = spacing[2]\n x_spacing[1:] += 1\n y_spacing[1:] += 1\n z_spacing[1:] += 1\n\n idx = 0\n\n if method == \"sum\":\n for x in x_spacing:\n for y in y_spacing:\n for z in z_spacing:\n patch = patches[idx]\n idx += 1\n\n if x == x_spacing[-1]:\n patch = patch[:-1, :, :, :]\n if y == y_spacing[-1]:\n patch = patch[:, :-1, :, :]\n if z == z_spacing[-1]:\n patch = patch[:, :, :-1, :]\n\n initial_array[\n x : x + patch.shape[0],\n y : y + patch.shape[1],\n z : z + patch.shape[2],\n ] += patch\n if method == \"crop\":\n assert border\n\n for x in x_spacing:\n for y in y_spacing:\n for z in z_spacing:\n patch = patches[idx]\n idx += 1\n patch = patch[\n border[0] : -border[0],\n border[1] : -border[1],\n border[2] : -border[2],\n ]\n\n if x == x_spacing[-1]:\n patch = patch[:-1, :, :, :]\n if y == y_spacing[-1]:\n patch = patch[:, :-1, :, :]\n if z == z_spacing[-1]:\n patch = patch[:, :, :-1, :]\n\n initial_array[\n x : x + patch.shape[0],\n y : y + patch.shape[1],\n z : z + patch.shape[2],\n ] += patch\n\n return initial_array", "def _sample_frcnn_minibatch_per_image(model,\n proposal_boxlist,\n gt_boxlist):\n (loc_targets, loc_weights, cls_targets, cls_weights, msk_targets, _\n ) = model._frcnn_target_assigner.assign(proposal_boxlist, gt_boxlist)\n\n # `cls_weights` is set to ones of shape [max_num_proposals] if all proposals\n # have classification weight being 0.\n cls_weights += tf.to_float(tf.equal(tf.reduce_sum(cls_weights), 0))\n positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0)\n\n # [max_num_proposals], indicator matrix sum to val <= `frcnn_minibatch_size`\n sampled_indicator = model._frcnn_minibatch_sampler_fn(\n tf.cast(cls_weights, tf.bool),\n model._frcnn_minibatch_size,\n positive_indicator)\n\n sampled_indices = tf.reshape(tf.where(sampled_indicator), [-1])\n\n proposal_boxlist.set_field('cls_targets', cls_targets)\n proposal_boxlist.set_field('cls_weights', cls_weights)\n proposal_boxlist.set_field('loc_targets', loc_targets)\n proposal_boxlist.set_field('loc_weights', loc_weights)\n if msk_targets is not None:\n proposal_boxlist.set_field('msk_targets', msk_targets)\n\n return box_list_ops.gather(proposal_boxlist, sampled_indices)", "def compute_kratios(self):\n if len(self._layers) == 0:\n return self._compute_kratios_substrate()\n else:\n return self._compute_kratios_multilayers()", "def getRectangularKernel(size = (5,5)):\n\treturn cv2.getStructuringElement(cv2.MORPH_RECT, size)", "def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features,\n num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False,\n conv_per_stage=2):\n if not isinstance(num_pool_per_axis, np.ndarray):\n num_pool_per_axis = np.array(num_pool_per_axis)\n\n npool = len(pool_op_kernel_sizes)\n\n map_size = np.array(patch_size)\n tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features +\n num_modalities * np.prod(map_size, dtype=np.int64) +\n num_classes * np.prod(map_size, dtype=np.int64))\n\n num_feat = base_num_features\n\n for p in range(npool):\n for pi in range(len(num_pool_per_axis)):\n map_size[pi] /= pool_op_kernel_sizes[p][pi]\n num_feat = min(num_feat * 2, max_num_features)\n num_blocks = 10 # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv\n tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat\n if deep_supervision and p < (npool - 2):\n tmp += np.prod(map_size, dtype=np.int64) * num_classes\n # ##print(p, map_size, num_feat, tmp)\n return tmp", "def test_invalid_input_shape(self):\r\n for unroll_batch in [None, 1, 3]:\r\n for unroll_kern in [None, 2, 4]:\r\n for unroll_patch in [None, True, False]:\r\n for mode in ['valid', 'full']:\r\n self.assertRaises(ValueError, self.validate,\r\n (3, 2, 8, 8), (4, 2, 5, 5),\r\n mode, N_image_shape=(2, 2, 8, 8),\r\n unroll_batch=unroll_batch,\r\n unroll_kern=unroll_kern,\r\n unroll_patch=unroll_patch)\r\n self.assertRaises(ValueError, self.validate,\r\n (3, 2, 8, 8), (4, 2, 5, 5),\r\n mode, N_image_shape=(3, 1, 8, 8),\r\n unroll_batch=unroll_batch,\r\n unroll_kern=unroll_kern,\r\n unroll_patch=unroll_patch)\r\n self.assertRaises(ValueError, self.validate,\r\n (3, 2, 8, 8), (4, 2, 5, 5),\r\n mode, N_image_shape=(3, 2, 7, 8),\r\n unroll_batch=unroll_batch,\r\n unroll_kern=unroll_kern,\r\n unroll_patch=unroll_patch)\r\n self.assertRaises(ValueError, self.validate,\r\n (3, 2, 8, 8), (4, 2, 5, 5),\r\n mode, N_image_shape=(3, 2, 8, 7),\r\n unroll_batch=unroll_batch,\r\n unroll_kern=unroll_kern,\r\n unroll_patch=unroll_patch)\r\n\r\n self.assertRaises(ValueError, self.validate,\r\n (3, 2, 8, 8), (4, 2, 5, 5),\r\n mode, N_filter_shape=(3, 2, 5, 5),\r\n unroll_batch=unroll_batch,\r\n unroll_kern=unroll_kern,\r\n unroll_patch=unroll_patch)\r\n self.assertRaises(ValueError, self.validate,\r\n (3, 2, 8, 8), (4, 2, 5, 5),\r\n mode, N_filter_shape=(4, 1, 5, 5),\r\n unroll_batch=unroll_batch,\r\n unroll_kern=unroll_kern,\r\n unroll_patch=unroll_patch)\r\n self.assertRaises(ValueError, self.validate,\r\n (3, 2, 8, 8), (4, 2, 5, 5),\r\n mode, N_filter_shape=(4, 2, 6, 5),\r\n unroll_batch=unroll_batch,\r\n unroll_kern=unroll_kern,\r\n unroll_patch=unroll_patch)\r\n self.assertRaises(ValueError, self.validate,\r\n (3, 2, 8, 8), (4, 2, 5, 5),\r\n mode, N_filter_shape=(4, 2, 5, 6),\r\n unroll_batch=unroll_batch,\r\n unroll_kern=unroll_kern,\r\n unroll_patch=unroll_patch)", "def preprocess_train_keep_aspect_ratio(im, boxes, classes, inst_masks, mask, min_side, max_side,\n canvas_height, canvas_width,\n use_augment=False, training_scale=[0.3, 0.5, 0.7, 1.0]):\n im, inst_masks, mask, boxes, classes, im_scale = resize_as_min_side(im, inst_masks, mask, boxes, classes,\n min_side=min_side, max_side=max_side)\n\n im, inst_masks, mask, boxes, classes = random_flip(im, inst_masks, mask, boxes, classes)\n if use_augment:\n if np.random.choice([0, 1]) != 0:\n scale = np.random.choice(training_scale) # adding more small objects\n im, inst_masks, mask, boxes, classes = random_scale(im, inst_masks, mask, boxes, classes, scale=scale)\n\n im, inst_masks, mask, boxes, classes = pad_to_canvas(im, inst_masks, mask, boxes, classes,\n canvas_width=canvas_width,\n canvas_height=canvas_height)\n\n # im = distort_color(im)\n im = imcv2_recolor(im)\n\n boxes = np.asarray(boxes, dtype=np.float32)\n inst_masks = np.zeros([1, im.shape[0], im.shape[1]], dtype=inst_masks.dtype) if inst_masks.size == 0 else inst_masks\n return im, boxes, classes, inst_masks, mask, im_scale", "def view_shapes(xm_train, xm_dev, xm_test, ym_train, ym_dev, ym_test):\n\n print('X_train shape: ' + str(xm_train.shape))\n print('Y_train shape: ' + str(ym_train.shape))\n print('X_dev shape: ' + str(xm_dev.shape))\n print('Y_dev shape: ' + str(ym_dev.shape))\n print('X_test shape: ' + str(xm_test.shape))\n print('Y_test shape: ' + str(ym_test.shape))\n print('\\n')", "def conv_mlp_layer_shape(input_shape: tuple, conv_feature_sizes: list, kernel: int, \n stride: int, padding: int, max_pool: tuple):\n input_shape_list = list(input_shape)\n for layer_number in range(len(conv_feature_sizes)):\n input_shape_list[0] = int((((input_shape_list[0] - kernel + 2*padding)/stride) + 1) // max_pool[0])\n input_shape_list[1] = int((((input_shape_list[1] - kernel + 2*padding)/stride) + 1) // max_pool[1])\n \n input_shape_list.insert(0, int(conv_feature_sizes[-1]))\n return tuple(input_shape_list)", "def get_state_shape(s):\n c = _concat(batch_size, s)\n c_static = _concat(batch_size, s, static=True)\n size = array_ops.zeros(c, dtype=dtype)\n size.set_shape(c_static)\n return size", "def testShapes(self, use_bias):\n\n batch_size = random.randint(1, 100)\n in_length = random.randint(10, 288)\n in_channels = random.randint(1, 10)\n out_channels = random.randint(1, 32)\n\n kernel_shape = random.randint(1, 10)\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_length, in_channels])\n\n conv1 = snt.Conv1D(\n output_channels=out_channels,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n stride=1,\n name=\"conv1\",\n use_bias=use_bias)\n\n output1 = conv1(inputs)\n\n self.assertTrue(\n output1.get_shape().is_compatible_with(\n [batch_size, in_length, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [kernel_shape, in_channels, out_channels]))\n\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))\n\n conv2 = snt.Conv1D(\n output_channels=out_channels,\n kernel_shape=kernel_shape,\n padding=snt.VALID,\n stride=1,\n name=\"conv2\",\n use_bias=use_bias)\n\n output2 = conv2(inputs)\n\n self.assertTrue(\n output2.get_shape().is_compatible_with(\n [batch_size, in_length - kernel_shape + 1, out_channels]))\n\n self.assertTrue(\n conv2.w.get_shape().is_compatible_with(\n [kernel_shape, in_channels, out_channels]))\n\n if use_bias:\n self.assertTrue(\n conv2.b.get_shape().is_compatible_with(\n [out_channels]))" ]
[ "0.6113688", "0.59597874", "0.59597874", "0.59597874", "0.59297377", "0.5638191", "0.55854297", "0.55849034", "0.55610806", "0.5559144", "0.5558414", "0.5542325", "0.54853", "0.54853", "0.5475481", "0.545066", "0.54470015", "0.54394406", "0.5434174", "0.5428862", "0.5427932", "0.54251105", "0.54242826", "0.5412443", "0.53987986", "0.5370135", "0.5362492", "0.53557646", "0.53557646", "0.53549695", "0.53488463", "0.5348494", "0.53167486", "0.53148127", "0.5312046", "0.5306727", "0.5301681", "0.52963954", "0.5287777", "0.52803195", "0.52788335", "0.52656543", "0.52655476", "0.5253779", "0.5244813", "0.52393013", "0.5229748", "0.52192074", "0.5188516", "0.51884097", "0.5186127", "0.51821285", "0.5180606", "0.5176652", "0.51572055", "0.5154152", "0.515024", "0.5146969", "0.5146969", "0.5144095", "0.51425475", "0.51418024", "0.514091", "0.51392645", "0.513733", "0.51354706", "0.5127826", "0.5120285", "0.511648", "0.51126254", "0.5108637", "0.5108346", "0.5106739", "0.5101228", "0.50846136", "0.50834197", "0.5081635", "0.5080585", "0.5072493", "0.50715065", "0.5071106", "0.5067771", "0.5058199", "0.50580627", "0.50539315", "0.5051062", "0.5050302", "0.5049744", "0.5043649", "0.5035776", "0.50338674", "0.5031306", "0.50243795", "0.501962", "0.5012651", "0.5006135", "0.50032526", "0.49993676", "0.49988517", "0.49968454" ]
0.5596443
6
Selects the action with the highest Qvalue with probability 1eps_threshold and a random action otherwise
def select_action(policy_net, state, eps, n_actions, device, steps_done): sample = random.random() if sample > eps: with torch.no_grad(): # t.max(1) will return largest column value of each row # second column on max result is index of where max element was # found, so we pick action with the larger expected reward return torch.tensor([[policy_net.forward(state.float()).argmax()]], device=device, dtype=torch.long) else: return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _select_action(self):\n if self.eval_mode:\n self._log_values()\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state according\n # to the current head.\n return self._compute_q_argmax()", "def select_action(self):\n estimated_q_a = self._action_value_estimator.get_estimated_q_a()\n\n if np.random.rand() < self._epsilon:\n chosen_action = random.choice(list(estimated_q_a.keys()))\n else:\n chosen_action = max(estimated_q_a, key=estimated_q_a.get)\n\n return chosen_action", "def chooseAction(self,state):\r\n #generate float btwn 0-1\r\n choice = random.random()\r\n feat = self.feat_funct(state)\r\n #choose according to that number\r\n if choice > self.epsilon:\r\n return(self.maxQ(feat)[1])\r\n else:\r\n #choose randomly\r\n return(self.actions[random.randrange(0,len(self.actions))])", "def chooseAction(self,state):\r\n #generate float btwn 0-1\r\n choice = random.random()\r\n \r\n #choose according to that number\r\n if choice > self.epsilon:\r\n return(self.maxQ(state)[1])\r\n else:\r\n #choose randomly\r\n return(self.actions[random.randrange(0,len(self.actions))])", "def chooseAction(self,state):\r\n #generate float btwn 0-1\r\n choice = random.random()\r\n \r\n #choose according to that number\r\n if choice > self.epsilon:\r\n return(self.maxQ(state)[1])\r\n else:\r\n #choose randomly\r\n return(self.actions[random.randrange(0,len(self.actions))])", "def choose_action(self, state):\n if random.random() < self.epsilon:\n self.epsilon -= self.epsilon_annealing_rate\n return random.choice(self.valid_actions)\n \n #initialize search variables\n opt_action = self.valid_actions[0]\n opt_value = 0\n\n #performs a search across all valid actions for highest q-value.\n for action in self.valid_actions:\n cur_value = self.q_value(state, action)\n if cur_value > opt_value:\n opt_action = action\n opt_value = cur_value\n elif cur_value == opt_value:\n opt_action = random.choice([opt_action, action])\n return opt_action", "def select_action(self, q_values):\n assert q_values.ndim == 1\n nb_actions = q_values.shape[0]\n if np.random.uniform() < self.eps:\n copy_q_values = np.copy(q_values)\n idx = np.argmax(q_values)\n copy_q_values[idx] = 0\n for i in range(0, nb_actions):\n val = copy_q_values[i]\n copy_q_values[i] = -1e8 if val == 0 else val * np.random.uniform()\n action = np.argmax(copy_q_values)\n else:\n action = np.argmax(q_values)\n return action", "def choose_action(env, Q, observation, epsilon):\n if np.random.uniform(0, 1) < epsilon:\n action = env.action_space.sample()\n else:\n action = np.argmax(Q[observation, :])\n return action", "def pick_action(self, available_actions, epsilon=.05):\n if np.random.uniform(0, 1) < epsilon:\n action = available_actions[np.random.randint(\n 0, len(available_actions))]\n else:\n q_values_of_state = self.q_table[self.environment.current_location]\n maxValue = max(q_values_of_state.values())\n action = np.random.choice(\n [k for k, v in q_values_of_state.items() if v == maxValue]\n )\n\n return action", "def choose_action(self, state):\n prob = [] # Probability distribution\n for i in range(len(ACTIONS)):\n prob.append(self.epsilon/4)\n Q_func = self.policy.predict(process_state(state))\n Q_vals = Q_func[0]\n max_index = []\n Qmax = np.amax(Q_vals)\n for i in range(len(prob)):\n if Q_vals[i] == Qmax:\n # max_index.append(i)\n prob[i] = 1 - self.epsilon + self.epsilon/4\n break\n # ind = np.random.choice(max_index)\n # prob[ind] = 1 - self.epsilon + self.epsilon/4\n action = np.random.choice(ACTIONS, p = prob)\n return action", "def select_action(self, state):\n \n ##create lists and string to save relative action information\n actions = []\n action = ''\n all_actions = []\n \n ##get the action with the maximum value\n temp = {}\n for (s, a), value in self.Q.iteritems():\n if s == state:\n temp[(s, a)] = value\n all_actions.append(a) \n max_value = max(temp.values())\n for (s, a) , value in temp.iteritems():\n if value == max_value:\n actions.append(a)\n\n ##if we have more than one action with max_values, random return one\n if len(actions) > 1:\n index = random.randint(0,len(actions) - 1)\n action = str(actions[index])\n else:\n for item in actions:\n action = item\n \n ##when the random number less than epsilon, then return one action randomly \n if random.random() < self.epsilon:\n index = random.randint(0, len(all_actions) - 1)\n action = str(all_actions[index])\n \n ##if the random number not less than epsilon, then return the action with max value\n return action", "def select_action(self, state):\n \n ##create lists and string to save relative action information\n actions = []\n action = ''\n all_actions = []\n \n ##get the action with the maximum value\n temp = {}\n for (s, a), value in self.Q.iteritems():\n if s == state:\n temp[(s, a)] = value\n all_actions.append(a)\n \n max_value = max(temp.values())\n for (s, a) , value in temp.iteritems():\n if value == max_value:\n actions.append(a)\n\n ##if we have more than one action with max_values, random return one\n if len(actions) > 1:\n index = random.randint(0,len(actions) - 1)\n action = str(actions[index])\n else:\n for item in actions:\n action = item\n \n ##when the random number less than epsilon, then return one action randomly \n if random.random() < self.epsilon:\n index = random.randint(0, len(all_actions) - 1)\n action = str(all_actions[index])\n \n ##if the random number not less than epsilon, then return the action with max value\n return action", "def act(self,observation):\n maximum_actions = np.argwhere(self.q_table[observation] == np.amax(self.q_table[observation])).flatten()\n return(np.random.choice(maximum_actions))", "def select_action(self, state):\n # print(\"agent.select_action() - state: {}\".format(state))\n\n self.step_counter += 1\n # self.epsilon = max(0.1, 1.0-self.step_counter/self.epsilon_decay_steps)\n epsilon_min = .01\n epsilon_max = .8\n epsilon_step = epsilon_max - (epsilon_max - epsilon_min) * self.step_counter / self.epsilon_decay_steps\n self.epsilon = max(epsilon_min, epsilon_step)\n # self.epsilon = max(0.1, 1.0/self.step_counter)\n\n rand = random.uniform(0, 1)\n if rand < self.epsilon:\n # choose random action\n return np.random.choice(self.nA)\n else:\n # choose greedy action\n return np.argmax(self.Q[state])", "def choose_action(self, state):\n if random.random() < self.e_greedy_prob:\n # randomly select action from state\n action = np.random.choice(len(self.q_val_table[state]))\n else:\n # greedily select action from state\n action = np.argmax(self.q_val_table[state])\n return action", "def chooseAction(self, epsilon, state):\n if random.uniform(0, 1) < epsilon:\n return random.randrange(9)\n\n cur_best_val = -float('inf')\n cur_best_action = 0\n\n data = env.getAllNextStates(state)\n\n with torch.no_grad():\n for action, next_state, done in data:\n if next_state != state:\n value = self.NN(self.RBF[next_state]).item() if not done else 0\n if value > cur_best_val:\n cur_best_val = value\n cur_best_action = action\n #print(data)\n return cur_best_action", "def select_action(self, q_values, **kwargs):\n _rand = np.random.rand(1, 1)\n rand_action = np.random.randint(self.num_actions, size=(5, 5))\n q_values = np.reshape(q_values, [10, 5, 5])\n max_action = np.argmax(q_values, axis=0)\n # max_action = np.reshape(max_action, (-1,1))\n action_map = np.reshape(max_action, (5, 5))\n # print(action_map)\n _mask = _rand < self.epsilon\n res = _mask * rand_action + (1 - _mask) * max_action\n return res", "def epsilonGreedyChooser(normalAction, state, stepsDone):\n epsThreshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * stepsDone / EPS_DECAY)\n randomSample = random.random()\n if randomSample > epsThreshold:\n action = normalAction(state).max(1)[1].view(1, 1)[0].item()\n #print(action)\n return action\n else:\n return ENVIRONMENT.action_space.sample()", "def _select_action(self):\n if self.eval_mode:\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state.\n if self._interact == 'stochastic':\n selected_action = self._stochastic_action\n elif self._interact == 'greedy':\n selected_action = self._q_argmax\n else:\n raise ValueError('Undefined interaction')\n return self._sess.run(selected_action,\n {self.state_ph: self.state})", "def __call__(self, state, q_values):\n\n if self.policy_type == \"greedy\":\n is_greedy = True\n else:\n is_greedy = random.uniform(0, 1) > self.epsilon\n\n if is_greedy :\n # choose greedy action\n index_action = np.argmax(q_values[state])\n else:\n # get a random action\n index_action = random.randint(0,3)\n\n return actions_dict[index_action]", "def select_action(self, state):\r\n policy_s = self.epsilon_greedy_probs(self.nA, self.Q[state], self.count, self.epsilon)\r\n return np.random.choice(np.arange(self.nA), p=policy_s)", "def select_action(self, state: str) -> Action:\n rnd_num = self._random.random()\n p = 1.0 - self.epsilon\n if rnd_num > p:\n action = self._random.random_choice() \n else:\n action = max(self.Qs[state], key=lambda x: self.Qs[state][x])\n if self.epsilon_decay == True:\n self.turns += 1\n if self.turns < self.end_epsilon_decay:\n self.epsilon -= self.decay_value \n return action", "def pick_action(self, observation):\n if np.random.rand() < self.epsilon:\n action = np.random.randint(self.n_arm) # 从n个arm中随机选择一个\n else: # 1-epsilon greedy\n # 所谓reward, 就是success平均值\n posterior_means = self.get_posterior_mean() # shape:[arm, 1], 从中选择一个reward最大的arm\n action = random_argmax(posterior_means)\n\n return action", "def get_action(self,state):\n \n q_values = self.__network.predict(state[None])[0]\n \n ###YOUR CODE\n if np.random.rand()<self.epsilon:\n return np.random.choice(self.n_actions)\n return np.argmax(q_values)", "def bestAction(self):\n get_q = self.getQFunction()\n maxq = -5000\n best_actions = []\n for (state, action), q in get_q.items():\n if q > maxq:\n maxq = q\n best_actions = [action]\n elif q == maxq:\n best_actions.append(action)\n return self.tuple_to_dictionary(random.choice(best_actions))", "def eps_greedy(Q, epsilon, num_actions):\n if np.random.uniform(0,1,1) > epsilon:\n action = np.argmax(Q)\n else:\n action = np.random.randint(low=0, high=num_actions)\n \n Q_value = Q[action]\n return action, Q_value", "def select_action(self) -> int:\n # simulation loop\n for i in range(self.iterations):\n self.__simulate(self.root, self.iterations)\n\n # action choice\n max_q = 0\n best_action = 0\n for action in actions:\n new_node = self.root.children[action]\n value = new_node.Q\n if value > max_q:\n max_q = value\n best_action = action\n return best_action", "def select_action(state, policy, model, num_actions,\n EPS_START, EPS_END, EPS_DECAY, steps_done, alpha, beta):\n # sample = random.random()\n # eps_threshold = EPS_END + (EPS_START - EPS_END) * \\\n # math.exp(-1. * steps_done / EPS_DECAY)\n # .data.max(1)[1].view(1, 1)\n # if sample <= eps_threshold:\n # return LongTensor([[random.randrange(num_actions)]])\n\n\n \n Q = model(Variable(state, volatile=True).type(FloatTensor))\n pi0 = policy(Variable(state, volatile=True).type(FloatTensor))\n # print(pi0.data.numpy())\n V = torch.log((torch.pow(pi0, alpha) * torch.exp(beta * Q)).sum(1) ) / beta\n \n #### FOUND ERROR: ( Q ) returns a tensor of nan at some point\n if np.isnan( Q.sum(1).data[0]) :\n print(\"Q = \", Q)\n print(\"state = \", state)\n\n pi_i = torch.pow(pi0, alpha) * torch.exp(beta * (Q - V))\n m = Categorical(pi_i)\n action = m.sample().data.view(1, 1)\n return action\n # numpy.random.choice(numpy.arange(0, num_actions), p=probabilities)", "def choose_action(self, observation):\r\n observation = T.unsqueeze(T.FloatTensor(observation), 0)\r\n # Epsilon-greedy policy\r\n if np.random.uniform() < self.epsilon: \r\n # Get all of the Q values for the current state (forward prop)\r\n actions_value = self.Q_eval.forward(observation)\r\n\r\n # Take the optimal action \r\n action = T.max(actions_value, 1)[1].data.numpy()\r\n action = action[0] if self.action_space == 0 else action.reshape(self.action_space) # return the argmax index\r\n else: \r\n # Choose a random action in the action space list\r\n action = np.random.randint(0, self.num_actions)\r\n action = action if self.action_space == 0 else action.reshape(self.action_space)\r\n\r\n return action", "def pick_action(self, observation):\n # 注意: 只有此处不一样, 即TS里是从后验分布中采样,而epsilon-greedy是计算期望\n sampled_means = self.get_posterior_sample() # 每个arm都采样一个reward均值, [arm, 1]\n action = random_argmax(sampled_means) # 选择产生最大的均值的action\n return action", "def select_action(self, q_values):\n assert q_values.ndim == 1\n q_values = q_values.astype('float64')\n nb_actions = q_values.shape[0]\n\n exp_values = np.exp(np.clip(q_values / self.tau, self.clip[0], self.clip[1]))\n probs = exp_values / np.sum(exp_values)\n action = np.random.choice(range(nb_actions), p=probs)\n log.info(f\"Chosen action by keras-rl {action} - probabilities: {probs}\")\n return action", "def choose_action(self, state, epsilon_greedy=False):\n chosen_action = None\n if epsilon_greedy:\n if np.random.rand() <= self.epsilon:\n print('random actions')\n\n # choose random action\n chosen_action = random.choice(self.actions)\n\n else:\n print('argmax')\n\n # find the action with greatest Q value\n maxQ = -float(\"inf\")\n for action in self.actions:\n input_data = np.asarray(state + action).reshape(self.OUTPUT_DIM, self.INPUT_DIM)\n Q = self.model.predict(input_data)\n if Q > maxQ:\n maxQ = Q\n chosen_action = action\n\n else:\n\n # policy rollout\n maxQ = -float(\"inf\")\n for action in self.actions:\n input_data = np.asarray(state + action).reshape(self.OUTPUT_DIM, self.INPUT_DIM)\n Q = self.model.predict(input_data)\n if Q > maxQ:\n maxQ = Q\n chosen_action = action\n\n return chosen_action", "def _choose_action(self):\n return random.randint(0,self.num_bandits-1)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n legal_actions = self.getLegalActions(state)\n if len(legal_actions) == 0: return None\n values = [self.getQValue(state, action) for action in legal_actions]\n max_value = max(values)\n best_indices = [index for index in range(len(values)) if values[index] == max_value]\n return legal_actions[random.choice(best_indices)]", "def _select_action(self, state):\n if random.random() < self.epsilon:\n action = random.randrange(self.num_actions)\n return torch.tensor([[action]], device=device, dtype=torch.long)\n else:\n with torch.no_grad():\n return self.policy_net(state).max(1)[1].view(1, 1)", "def choose_action( self):\n \"\"\"greedy, random, e-greedy, boltzmann, bayesian\"\"\"\n\tif self.exploration == \"greedy\":\n #Choose an action with the maximum expected value.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"random\":\n #Choose an action randomly.\n a = env.action_space.sample()\n if self.exploration == \"e-greedy\":\n #Choose an action by greedily (with e chance of random action) from the Q-network\n if np.random.rand(1) < e or total_steps < pre_train_steps:\n a = env.action_space.sample()\n else:\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"boltzmann\":\n #Choose an action probabilistically, with weights relative to the Q-values.\n Q_d,allQ = sess.run([q_net.Q_dist,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.Temp:e,q_net.keep_per:1.0})\n a = np.random.choice(Q_d[0],p=Q_d[0])\n a = np.argmax(Q_d[0] == a)\n return a\n if self.exploration == \"bayesian\":\n #Choose an action using a sample from a dropout approximation of a bayesian q-network.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:(1-e)+0.1})\n a = a[0]\n return a", "def act(self, state):\r\n self.state_info, actions= self.env.generatePossibleAction(state)\r\n # import pdb; pdb.set_trace()\r\n # print(actions)\r\n if self.eps > 0. and np.random.rand() < self.eps:\r\n # select the action randomly\r\n return random.choice(actions)\r\n # import pdb; pdb.set_trace()\r\n qvals = {action: self.Q_value[self.state_info, action] for action in actions}\r\n max_q = max(qvals.values())\r\n\r\n # in case of multiple actions having the same Q values\r\n actions_with_max_q = [a for a,q in qvals.items() if q == max_q]\r\n return random.choice(actions_with_max_q)", "def choose_action(state, qmatrix, numb_actions):\n\n qmatrix = check_state_exist(state, qmatrix, numb_actions)\n if np.random.uniform() < epsilon:\n # choose best action from qmatrix\n state_action = qmatrix.loc[state, :]\n state_action = state_action.reindex(np.random.permutation(state_action.index)) # some actions have same value\n action = state_action.idxmax()\n else:\n # choose random action from qmatrix\n action = np.random.choice(list(range(numb_actions))) \n return action", "def choose_action(Q_table, state, epsilon):\n if random.uniform(0, 1) < epsilon:\n return random.choice(ACTIONS)\n else:\n actions = Q_table.get(state.__str__())\n if actions is None:\n return random.choice(ACTIONS)\n else:\n return PlayerAction(np.argmax(actions))", "def select_action(self, q_values):\n \n action = Sc2Action()\n\n # Epsilon-Greedy\n # pdb.set_trace()\n egran=np.random.uniform()\n if egran < self.eps and not self.testing:\n action.action = np.random.random_integers(0, self.nb_actions-1)\n action.coords = (np.random.random_integers(0, self.nb_pixels-1), np.random.random_integers(0, self.nb_pixels-1))\n if self.eps <0.05:\n print('eps:',self.eps)\n\n else:\n # greedy.\n action.action = np.argmax(q_values[0]) \n #pdb.set_trace()\n action.coords = np.unravel_index(q_values[1].argmax(), q_values[1].shape)[1:3]\n \n # action.coords = np.unravel_index(np.reshape(q_values[1][0][:][:], (16, 16)).argmax(), np.reshape(\n\n assert len(action.coords) == 2\n\n return action", "def computeActionFromQValues(self, state):\n actions = self.getLegalActions(state)\n if len(actions) == 0:\n return None\n qVals = [self.getQValue(state, a) for a in actions]\n bestActions = []\n bestVal = max(qVals)\n for i in range(len(actions)):\n if qVals[i] == bestVal:\n bestActions.append(actions[i])\n return random.choice(bestActions) #Break ties randomly", "def chooseAction(self, state, use_epsilon=True):\n epsilon = self.epsilon if use_epsilon else 0\n choose_random = True if random.random() < epsilon else False\n # assume there is always an available action\n available_actions = Nim.availableActions(state)\n if choose_random:\n return random.choice(list(available_actions))\n \n best_q = -math.inf\n best_action = None\n for action in available_actions:\n q_val = self.getQValue(state, action)\n if q_val > best_q:\n best_q = q_val\n best_action = action\n return best_action", "def select_action(self, state):\n\n if state in self.Q:\n prob = self.get_probs(self.Q[state])\n else:\n prob = np.ones(self.nA) / self.nA\n return np.random.choice(np.arange(self.nA), p = prob)", "def act(self, state, epsilon):\n if random.random() > epsilon:\n state = torch.FloatTensor(state)\n q_values = self.dqn(state)\n action = q_values.argmax().item()\n else:\n action = self.env.action_space.sample()\n return action", "def select_action(self, state, epsilon=None):\n if epsilon == None:\n epsilon = self.epsilon\n \n if np.random.random() > epsilon:\n # greedy action selection\n return self.get_optimal_action(state)\n \n else:\n # random action selection\n return np.random.randint(0, self.num_actions)", "def select_action(self, state):\n if state in self.Q:\n action = np.random.choice(np.arange(self.nA), p=self.get_probs(self.Q[state], self.epsilon, self.nA))\n else :\n action = np.random.choice(self.nA)\n\n return action", "def actionSelector(self): \n if self.Temp!=0:\n if len(self.lessons) > 60 and self.var_T: \n # if the agent haven't already gotten food since a certain time \n # we increase the temperature by 0.001 \n if self.count_without_food>12:\n self.Temp += 0.01 \n if self.Temp>=(self.var_T[0]): \n self.Temp = self.var_T[0] \n # otherwise we decrease the temperatur by 0.001 \n else: \n self.Temp -= 0.001\n if self.Temp <= (self.var_T[-1]):\n self.Temp = self.var_T[-1]\n \n s = np.sum([np.exp(float(k)/self.Temp) for k in self.U_list])\n\n self.action_proba =[np.exp(float(m)/self.Temp)/s for m in self.U_list]\n action = np.random.choice(np.arange(4),p=self.action_proba) # choice a random choice relating to the probability distribution given by the softmax algorith \n else:\n action = np.argmax(self.U_list)\n return action", "def select_action(engine, observation):\n with torch.no_grad():\n dqn.eval()\n if torch.rand(1).item() < epsilon:\n return random_action(observation)\n else:\n return dqn(observation).greedy()", "def __call__(self, state):\n if random.random() > self._epsilon:\n return self._max_policy(state)\n return random.choice(np.arange(self._action_size))", "def best_action(self, state):\n return random.choice(self.possible_actions)", "def choose_action(self, board):\n options = board.empty_cells\n # to allow exploration, have a small probability of a random move\n p_random = random.random()\n # if the state is not in the table add it\n if (self.sign, board.state) not in self.Q_table.keys() or p_random < self.epsilon:\n values = {}\n for option in options:\n values[option] = random.random()\n self.Q_table[(self.sign, board.state)] = values\n self.action = random.choice(options)\n else:\n values = self.Q_table[(self.sign, board.state)]\n action = max(values, key=values.get)\n self.action = action\n\n # decrease exploration after each action\n if self.epsilon > 0:\n self.epsilon -= 0.0001\n\n return self.action", "def select_action(self, env, state, **kwargs):\n\n self.epsilon = 1 - (len(self.memory.memory)*1.0)/self.memory.max_size + 0.05\n if np.random.uniform() <= self.epsilon:\n index = env.action_space.sample()\n else:\n index = np.argmax(self.Q.predict(state))\n\n return index", "def choose_action(q_table: np.ndarray, state: int,\n exploration_rate: float) -> int:\n random_value = random.uniform(0, 1)\n if random_value > exploration_rate:\n action = best_action(q_table, state)\n else:\n num_actions = q_table.shape[1]\n action = random.randint(0, num_actions-1)\n return action", "def chooseAction(self, gameState):\n probabilities = self.assignProbablities(gameState)\n #print probabilities\n prob, bestProbabilityAction = max(probabilities)\n return bestProbabilityAction", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return None\n\n max_value = self.computeValueFromQValues(state)\n\n actions = [action for action in legal_actions if self.values[(str(state), action)] == max_value]\n\n return random.choice(actions)", "def choose_action(self):\n\n def is_random_exploration():\n\n # 5. Return whether do random choice\n # hint: generate a random number, and compare\n # it with epsilon\n if random.random() < self.epsilon:\n return True\n else:\n return False\n\n final_action = ''\n if self.learning:\n if is_random_exploration():\n # 6. Return random choose aciton\n final_action = self.valid_actions[random.randint(0, 3)]\n else:\n # 7. Return action with highest q value\n final_action = max(\n self.Qtable[self.state].items(),\n key=operator.itemgetter(1))[0]\n elif self.testing:\n # 7. choose action with highest q value\n final_action = max(\n self.Qtable[self.state].items(),\n key=operator.itemgetter(1))[0]\n else:\n # 6. Return random choose aciton\n final_action = self.valid_actions[random.randint(0, 3)]\n\n return final_action", "def get_action_choice(self, state: str, epsilon: float):\n\n # e-greedy\n if random.random() < epsilon:\n return {0: random.choice(Actions.actions), 1: random.choice(Actions.actions)}\n else:\n # Get the Q-values for the actions in this state\n Qs_t = self.Q_t[state]\n\n max_Qs_t = max(Qs_t.values())\n\n # find index of the max Q-values\n max_index = [a for a, q in Qs_t.items()\n if q == max_Qs_t]\n\n # choose one of the max-index with uniform distribution\n selected = random.choice(max_index)\n return {0: selected[0], 1: selected[1]}", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return None\n\n max_value = self.computeValueFromQValues(state)\n\n actions = [action for action in legal_actions if self.values[(str(position), action)] == max_value]\n\n return random.choice(actions)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return None\n\n max_value = self.computeValueFromQValues(state)\n\n actions = [action for action in legal_actions if self.values[(str(position), action)] == max_value]\n\n return random.choice(actions)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return None\n\n max_value = self.computeValueFromQValues(state)\n\n actions = [action for action in legal_actions if self.values[(str(position), action)] == max_value]\n\n return random.choice(actions)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return None\n\n max_value = self.computeValueFromQValues(state)\n\n actions = [action for action in legal_actions if self.values[(str(position), action)] == max_value]\n\n return random.choice(actions)", "def __sample_policy_action(probs):\n # Subtract a tiny value from probabilities in order to avoid\n # \"ValueError: sum(pvals[:-1]) > 1.0\" in numpy.multinomial\n probs = probs - np.finfo(np.float32).epsneg\n\n action_indexes = [int(np.nonzero(np.random.multinomial(1, p))[0]) for p in probs]\n############################################################################################\n # action_indexes = [np.argmax(p) for p in probs] #select the action with the highest probability instead of randomly sampling\n # print(action_indexes)\n # print('++++++++++++++++++++++++')\n############################################################################################\n return action_indexes", "def select_action(self, state, evaluate):\n random_number = np.random.uniform()\n if random_number < self.epsilon and evaluate==False:\n # Random action\n return torch.tensor(random.randint(0,self.env.nA-1))\n\n else:\n # Greedy action\n state = [state]\n state = torch.stack(state)\n state = state.to(self.device, dtype=torch.float)\n q_values = self.main_dqn(state)\n argmax = torch.argmax(q_values).item()\n\n if evaluate:\n if self.env.freely_moving:\n self.list_evaluation_values.append(q_values.reshape(self.env.number_of_rows, self.env.number_of_columns))\n else:\n self.list_evaluation_values.append(q_values.reshape(1, self.env.nA))\n\n return torch.tensor(argmax)", "def select_action(self, state):\n return np.argmax(self.Q[state])", "def better_action(tip_speed):\n possible_actions_in_state = Q[get_state(tip_speed)]\n action_of_choice = np.argmax(possible_actions_in_state)\n return action_of_choice", "def choose_action(self, agent_data):\r\n action_value_estimates = agent_data[\"action_value_estimates\"]\r\n roll = random.uniform(0,1)\r\n if roll <= self.epsilon:\r\n action = random.choice( list( range(0,len(action_value_estimates))))\r\n else:\r\n action = self.argmax_with_random_tiebreaker(action_value_estimates)\r\n return action", "def select_action(images, n_actions, device, eps_threshold=-1):\n actions = []\n\n for i in images:\n if eps_threshold == -1:\n actions.append(torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long))\n else:\n sample = random.random()\n if sample > eps_threshold:\n with torch.no_grad():\n # t.min(1) will return smallest column value of each row.\n # second column on min result is index of where min element was\n # found, so we pick action with the lower expected reward.\n actions.append(policy_net(i.unsqueeze(0)).min(1)[1].view(1, 1))\n else:\n actions.append(torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long))\n\n return torch.tensor(actions, device=device)", "def sample(self, action):\n selector = random.random()\n return 1 if selector <= self.pay_offs[action] else 0", "def obtain_action(self, timestep):\r\n\t\t# Generates a random number for deciding between performing a random or\r\n\t\t# deterministic action.\r\n\t\trandom_num = random.random()\r\n\r\n\t\tif random_num < self.epsilon:\r\n\t\t\t# Random action taken.\r\n\t\t\treturn random.randint(0, self.num_actions-1)\r\n\r\n\t\telse:\r\n\t\t\t# Deterministic action taken, where a random action index is\r\n\t\t\t# selected among the action indexes with the maximum Q value\r\n\t\t\t# estimate.\r\n\t\t\treturn np.random.choice(np.argwhere(self.player_Q == self.player_Q.max()).flatten()).item()", "def choose_random_action(env):\n return env.action_space.sample()", "def obtain_action(self, timestep):\r\n\t\t# Finds all actions which have not been selected before.\r\n\t\tzero_action = np.argwhere(self.player_selected_actions == 0).flatten()\r\n\r\n\t\t# Checks if there are any actions which have not been selected before.\r\n\t\tif zero_action.size:\r\n\t\t\t# Returns a random action index that has not been selected before.\r\n\t\t\treturn np.random.choice(zero_action).item()\r\n\r\n\t\telse:\r\n\t\t\t# Calculates the sum of the Q value estimate and the upper\r\n\t\t\t# confidence bound term\r\n\t\t\tvalue_list = self.player_Q + self.confidence_level * (np.log(timestep+1) / self.player_selected_actions) ** 0.5\r\n\r\n\t\t\t# Returns a random action index selected among the action indexes\r\n\t\t\t# with the maximum sum.\r\n\t\t\treturn np.random.choice(np.argwhere(value_list == value_list.max()).flatten()).item()", "def best_action(self, actions, state):\n\n maxQvalue = self.valueFromQvalues(state, actions)\n\n if GameEnds13(state):\n return None\n else:\n maxAction = [action for action in actions if self.getQvalue(state, action) == maxQvalue]\n best_action = random.choice(maxAction)\n return best_action", "def get_optimal_action(self, state):\n # check if there are multiple equivalent optimal actions\n if sum(self.Q_values[state] == np.amax(self.Q_values[state])) > 1:\n # select one of the optimal actions randomly\n idxs = np.where(self.Q_values[state] == np.amax(self.Q_values[state]))[0]\n return idxs[np.random.randint(0, idxs.size)]\n else:\n # return the unique optimal action\n return np.argmax(self.Q_values[state])", "def __selection(self, node: TreeNode) -> int:\n max_q = 0\n best_action = 0\n\n # update the value of N(s)\n node.N = 0\n for action in actions:\n node.N += node.children[action].N\n\n # choose action using UCT\n shuffled_actions = actions[:]\n random.shuffle(shuffled_actions)\n for action in shuffled_actions:\n new_node = node.children[action]\n value = new_node.Q + self.c * math.sqrt(math.log(node.N) / new_node.N)\n if value > max_q:\n max_q = value\n best_action = action\n return best_action", "def choose_random_action(self):\r\n return Action.HIT if random.random() <= 0.5 else Action.STICK", "def choose_action(self, observation):\n if np.random.random() < self.epsilon:\n action = np.random.choice(self.action_space)\n else:\n state = T.tensor(observation, dtype=T.float).to(self.Q.device)\n actions = self.Q.forward(state)\n\n # item() converts pytorch tensor to numpy array\n action = T.argmax(actions).item()\n\n return action", "def getPolicy(self, state):\n \"\"\"Description:\n Find all of q-values of current state, and choose the action \n with the hight q-value as optimal policy\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n legalActions = self.getLegalActions(state)\n action = None\n policy = util.Counter() # use counter to store action and its q-value\n \n if len(legalActions) == 0:\n return action\n \n for a in legalActions:\n policy[a] = self.getQValue(state, a)\n action = policy.argMax()\n return action\n\n \"\"\" END CODE \"\"\"", "def select_action(self, t):\n \n theta_hat = self.generate_parameter_sample(t)\n #print('theta_hat:', theta_hat)\n \n a = aux.argmax_of_array(theta_hat)\n #print('Actual Action:', a)\n \n return a", "def randomAction():\n return np.random.randint(0, POSSIBLE_ACTIONS)", "def sample_action(self, state):\n sample = random.random()\n eps_threshold = self.eps_min + (self.eps - self.eps_min) * \\\n math.exp(-1. * self.n_steps / self.eps_decay)\n self.n_steps += 1\n state = torch.from_numpy(state).float().view(1,-1)\n self.working_q.eval()\n if sample > eps_threshold:\n with torch.no_grad():\n action = self.working_q(state).max(1)[1].view(1, 1).to(device=device)\n else:\n action = torch.tensor([[random.randrange(2)]], device=device, dtype=torch.long)\n self.working_q.train()\n return action", "def takeAction(self, state):\n # go greedy or not?\n if random.uniform(0, 1) < self.epsilon:\n # greedy selection\n # find best action\n allActions = torch.stack(\n tuple(torch.cat((state.strengths, state.focus, changes)) for changes in self.actionSet))\n evaluation = self.q.evaluateBunch(allActions)\n action = Action(state, self.actionSet[evaluation.argmax()])\n return action\n else:\n # random selection\n return Action(state, random.choice(self.actionSet))", "def EpsGreedy(self, actions, game_state):\n if random.random() < self.epsilon:\n return random.choice(actions)\n else:\n return self.best_action(actions, game_state)", "def select_action(self, state):\n\t\treturn sample(range(0, self.action_space), 1)[0]", "def get_greedy_actions(self, state):\n state_action_values = self.get_action_values(state) # What are the value that we could get from current state\n\n max_action_value = max(state_action_values) # What is the higher value\n max_value_indices = [i for i, value in enumerate(state_action_values) if\n value == max_action_value] # Gets their indices\n\n # Prepares action probabilites for the ones with the higher value\n action_probs = np.zeros((4,))\n action_probs[max_value_indices] = 1 / (len(max_value_indices) if type(max_value_indices) is list else 1)\n\n return action_probs", "def greedy_next_action(self, state):\n max_val = float('-inf')\n if self.verbose:\n cells = []\n max_candidates = {}\n for i in range(3):\n for j in range(3):\n if state[i][j] == VALUES.EMPTY:\n val = self.q_value((state, (i, j)))\n if val >= max_val:\n max_val = val\n max_move = (i, j)\n max_candidates[max_move] = val\n if self.verbose:\n cells.append('{0:.3f}'.format(val).center(6))\n elif self.verbose:\n cells.append(state[i][j].center(6))\n if self.verbose:\n self.logger.info(BOARD.format(*cells))\n possible_actions = [k for k, v in max_candidates.items() if v == max_val]\n action = random.choice(possible_actions) if len(possible_actions) > 0 else None\n return action", "def act(self, state, epsilon):\n if random.random() > epsilon:\n state = torch.FloatTensor(state)\n q_values = self.dqn.forward(state)\n action = int(q_values.argmax())\n else:\n action = self.env.action_space.sample()\n\n return action", "def act(self, state):\n\t\trand_val = np.random.rand()\n\t\tif not self.is_eval and rand_val <= self.epsilon: # Do a random action only in train phase\n\t\t\treturn random.randrange(self.action_size)\n\n\t\tif self.firstIter: # If this is the first iteration, just do a \"hold\" action\n\t\t\tself.firstIter = False\n\t\t\treturn 2 # 2 = \"Hold action\"\n\n\t\toptions = self.model.predict(state) # Do a prediction based on a specific observation\n\t\t#print(options)\n\n\t\ttot = np.sum(options[0])\n\t\toptions[0] = options[0] / tot\n\t\t#print(options)\n\n\t\trand = random.random()\n\n\t\t#print(\"randm:\" + str(rand))\n\t\tif rand <= options[0][0]:\n\t\t\t#print(\"max:\" + str(np.argmax(options[0])) + \"ma 0\")\n\t\t\treturn 0\n\n\t\telif options[0][0] < rand <= (options[0][0] + options[0][1]):\n\t\t\t#print(\"max:\" + str(np.argmax(options[0])) + \"ma 1\")\n\t\t\treturn 1\n\t\telif (options[0][0] + options[0][1]) < rand <= (options[0][0] + options[0][1] + options[0][2]):\n\t\t\t#print(\"max:\" + str(np.argmax(options[0])) + \"ma 2\")\n\t\t\treturn 2\n\t\telse:\n\t\t\t#print(\"max:\" + str(np.argmax(options[0])) + \"ma 3\")\n\t\t\treturn 3\n\n\t\t#return np.argmax(options[0])'''", "def act(self, observation):\n if np.random.random() < self.epsilon:\n return np.random.randint(0,9)\n else:\n return np.argmax(self.values)", "def act(self, observation):\n if np.random.random() < self.epsilon:\n return np.random.randint(0,9)\n else:\n return np.argmax(self.values)", "def epsilon_greedy(Q, epsilon, n_actions, s, train=False):\n if train or np.random.rand() < epsilon:\n action = np.argmax(Q[s, :])\n else:\n action = np.random.randint(0, n_actions)\n return action", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n max_qvalue = None\n for action in self.legalActions:\n qvalue = self.getQValue(state, action)\n if max_qvalue is None or max_qvalue < qvalue:\n max_qvalue = qvalue\n\n if max_qvalue is None:\n return None\n\n actions = []\n for action in self.legalActions:\n qvalue = self.getQValue(state, action)\n if qvalue == max_qvalue:\n actions.append(action)\n\n if max_qvalue is not None and len(actions) == 0:\n return self.legalActions[0]\n if len(actions) > 1:\n return Const.DO_NOTHING\n return random.choice(actions)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n max_qvalue = None\n for action in self.legalActions:\n qvalue = self.getQValue(state, action)\n if max_qvalue is None or max_qvalue < qvalue:\n max_qvalue = qvalue\n\n if max_qvalue is None:\n return None\n\n actions = []\n for action in self.legalActions:\n qvalue = self.getQValue(state, action)\n if qvalue == max_qvalue:\n actions.append(action)\n\n if max_qvalue is not None and len(actions) == 0:\n return self.legalActions[0]\n if len(actions) > 1:\n return Const.DO_NOTHING\n return random.choice(actions)", "def act(self, a_state):\n if np.random.rand() <= self.epsilon:\n return random.randrange(self.n_actions)\n else:\n action_values = self.model.predict(a_state)\n\n return np.argmax(action_values[0])", "def selectAction(self, state, require_q=False):\n e = self.exploration.value(self.steps_done)\n self.steps_done += 1\n q_values = self.forwardPolicyNet(state)\n if random.random() > e:\n action = q_values.max(1)[1].view(1, 1)\n else:\n if hasattr(self.env, 'nA'):\n action_space = self.env.nA\n else:\n action_space = self.env.action_space.n\n action = torch.tensor([[random.randrange(action_space)]], device=self.device, dtype=torch.long)\n q_value = q_values.gather(1, action).item()\n if require_q:\n return action, q_value\n return action", "def act(self, q_values, *args, **kwargs):\n if np.random.binomial(1, p=self.epsilon_updater.cur_value):\n action = np.array([np.random.choice(range(len(q_values)))])\n else:\n action = np.array([np.argmax(q_values)])\n self.epsilon_updater.update()\n return action", "def choose_action(s, epsilon):\n # Update PI(s, a) for all actions a for that state s:\n # action probabilities = epsilon/(|A|-1) for all actions by default\n # over |A|-1 because 1 of them will be optimal and have proba 1-epsilon\n global PI\n PI[s, :] = [epsilon / (len(ACTIONS)-1.)] * len(ACTIONS)\n\n # Get the best action for that state (greedy w.r.t. Q):\n best_a = 0\n best_q_val = -np.inf\n for i, q_val in enumerate(Q[s,:]):\n if q_val > best_q_val:\n best_q_val = q_val\n best_a = i\n\n # Change default proba of best action to be 1-epsilon\n PI[s, best_a] = 1. - epsilon\n # print \"best action:\", best_a\n assert np.isclose(np.sum(PI[s, :]), 1.)\n\n # sample from ACTIONS with proba distribution PI[s, :]\n return np.random.choice(ACTIONS, p=PI[s, :])", "def select_action(self, **kwargs):\n return np.random.randint(0, self.num_actions)", "def chooseAction(self, gameState):\n\n ####print \"chooseAction Called\"\n\n #self.lastEatenFood = None\n\n\n actions = gameState.getLegalActions(self.index)\n\n ##print \"\\nNEW ACTION\\n--------\"\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n # ###print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n \n\n return random.choice(bestActions)", "def get_action(self, history):\n history = np.float32(history / 255.0)\n if np.random.rand() <= self.epsilon:\n return random.randrange(3)\n\n else:\n q_values = self.q_duelling_part.predict(history)\n\n return np.argmax(q_values[0])", "def __act__(\n self,\n t: int\n ) -> Action:\n\n if self.random_state.random_sample() < self.epsilon:\n a = self.random_state.choice(self.most_recent_state.AA)\n self.epsilon *= (1 - self.epsilon_reduction_rate)\n else:\n a = self.greedy_action\n\n return a" ]
[ "0.79709095", "0.78705657", "0.7817313", "0.7803791", "0.7803791", "0.76855636", "0.7661042", "0.759864", "0.7582084", "0.75690514", "0.75047153", "0.7487146", "0.7473155", "0.74240166", "0.74202406", "0.7387183", "0.7359474", "0.7358641", "0.73480034", "0.73178715", "0.72899866", "0.727806", "0.72729003", "0.7265818", "0.72582126", "0.7251893", "0.7242832", "0.72105384", "0.72031623", "0.7201035", "0.71997863", "0.7184729", "0.7172662", "0.7171903", "0.71658707", "0.71618664", "0.7109972", "0.71062714", "0.71007687", "0.70927143", "0.70862883", "0.7059277", "0.70297307", "0.7027544", "0.70071125", "0.7000017", "0.69895434", "0.69626975", "0.6942636", "0.6938694", "0.6933843", "0.69315696", "0.6925297", "0.69119424", "0.69051796", "0.68936044", "0.6882847", "0.68802214", "0.68802214", "0.68802214", "0.68802214", "0.68775123", "0.6869547", "0.6847025", "0.68454236", "0.6845136", "0.68438333", "0.68230313", "0.6766024", "0.6764586", "0.67600083", "0.6754048", "0.6742063", "0.6736167", "0.6718651", "0.67174655", "0.67159224", "0.6710991", "0.67064947", "0.6703097", "0.67027575", "0.67025036", "0.6697655", "0.66906273", "0.66742235", "0.66739106", "0.66723764", "0.6671529", "0.6671529", "0.66692466", "0.6661025", "0.6661025", "0.6659995", "0.66529226", "0.6649635", "0.6648521", "0.66434604", "0.6643041", "0.66205525", "0.66116387" ]
0.74595815
13
Does one update of the optimizer for the policy_net
def optimize_model(memory, BATCH_SIZE, device, policy_net, Transition, n_actions, target_net, gamma, optimizer, double_q_learning=True, gradient_clipping=True, initial_replay_size=0): if len(memory) < BATCH_SIZE or len(memory) < initial_replay_size: return transitions = memory.sample(BATCH_SIZE) # Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for # detailed explanation). This converts batch-array of Transitions # to Transition of batch-arrays. batch = Transition(*zip(*transitions)) # Compute a mask of non-final states and concatenate the batch elements # (a final state would've been the one after which simulation ended) non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), device=device, dtype=torch.bool) # use stack if the input state has only one dimension (is a vector) if batch.state[0].dim() == 1: non_final_next_states = torch.stack([s.float() for s in batch.next_state if s is not None]) state_batch = torch.stack([s.float() for s in batch.state]) # use this if the input state has several dimensions else: non_final_next_states = torch.cat([s.float() for s in batch.next_state if s is not None]) state_batch = torch.cat([s.float() for s in batch.state]) action_batch = torch.cat(batch.action) reward_batch = torch.cat(batch.reward) # Compute Q(s_t, a) - the model computes Q(s_t), then we select the columns of actions taken. # These are the actions which would've been taken for each batch state according to policy_net state_action_values = policy_net.forward(state_batch).gather(1, action_batch) if not double_q_learning: # Compute V(s_{t+1}) for all next states. # Expected values of actions for non_final_next_states are computed based # on the "older" target_net; selecting their best reward with max(1)[0]. # This is merged based on the mask, such that we'll have either the expected # state value or 0 in case the state was final. next_state_values = torch.zeros(BATCH_SIZE, device=device) next_state_values[non_final_mask] = target_net.forward(non_final_next_states).max(1)[0].detach() else: # Double Q Learning next_state_values = torch.zeros(BATCH_SIZE, device=device) # We use Double Q Learning, that is the best action is chosen with the policy net, but it's # value is calculated with the target net next_state_best_actions = torch.zeros([BATCH_SIZE,n_actions], device=device) next_state_best_actions[non_final_mask] = policy_net.forward(non_final_next_states) best_action_mask = torch.zeros_like(next_state_best_actions) best_action_mask[torch.arange(len(next_state_best_actions)), next_state_best_actions.argmax(1)] = 1 next_state_values[non_final_mask] = target_net.forward(non_final_next_states).masked_select( best_action_mask[non_final_mask].bool()) # Compute the expected Q values expected_state_action_values = (next_state_values*gamma) + reward_batch # Compute mse loss loss = F.mse_loss(state_action_values, expected_state_action_values.unsqueeze(1)) # Optimize the model optimizer.zero_grad() loss.backward() if gradient_clipping: for param in policy_net.parameters(): param.grad.data.clamp_(-1, 1) optimizer.step() return optimizer, policy_net
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_optimizer(self, context, optimizer, host):\n pass", "def update_policy(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def defineUpdateOperations(self):\n self.updated_value = tf.placeholder(shape=[1, self.network.action_size], dtype=tf.float32)\n self.loss = tf.reduce_sum(tf.square(self.updated_value - self.network.policyLayer))\n self.trainer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)\n\n self.updateModel = self.trainer.minimize(self.loss)", "def update_op(self, loss, learning_rate,var):\n #train_op = None\n ####### Implementation Here ######\n #pass\n train_op = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss = loss,var_list = var )\n return train_op", "def update_policy(self):\n self._sess.run(self._hard_copy_to_target_op);", "def _update_target_net(self):\n self.target_net.load_state_dict(self.policy_net.state_dict())\n self.target_net.eval()", "def update_policy(self):\n pass", "def initialize_optimization(self):\n\n if self.FLAGS.optimizer == \"Adam\" :\n self.solver = tf.train.AdamOptimizer(\n learning_rate = self.learning_rate,\n beta1 = self.FLAGS.beta1,\n beta2 = self.FLAGS.beta2)\n else:\n print(\"ERROR: Cannot handle optimizer type {}!!!\".format(self.FLAGS.optimizer))\n raise RuntimeError\n \n # batch normalization in tensorflow requires this extra dependency\n # this is required to update the moving mean and moving variance variables\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n self.update = self.solver.minimize(self.loss, global_step=self.global_step)", "def add_optimizers_to_graph(self):\n with tf.device(self.params.device):\n with self.graph.as_default():\n with tf.compat.v1.variable_scope(\"optimizers\") as scope:\n self.grads_and_vars = list() # [sch_idx][weight_idx]\n self.apply_grads = list() # [sch_idx][weight_idx]\n self.learning_rates = list() # [sch_idx][weight_idx]\n if self.params.optimizer == \"lbfgsb\":\n self.minimizer = None\n #self.minimizer = tfp.optimizer.lbfgs_minimize(\n # value_and_gradients_function=self.loss_value_and_grad,#self.total_loss,\n # initial_position=self.w_init,#self.trainable_variables,\n # max_iterations=self.params.maxiter)\n #self.minimizer = tf.contrib.opt.ScipyOptimizerInterface(self.total_loss,\n # options={\"maxiter\":self.params.maxiter}) # Default method is L-BFGSB\n for schedule_idx, sch in enumerate(self.params.schedule):\n sch_grads_and_vars = list() # [weight_idx]\n sch_apply_grads = list() # [weight_idx]\n sch_lrs = list() # [weight_idx]\n #Construct weight ops\n weight_ops = [self.trainable_variables[weight] for weight in sch[\"weights\"]]\n for w_idx, weight in enumerate(sch[\"weights\"]):\n weight_name = weight.split(\"/\")[-1].split(\":\")[0]\n learning_rates = tf.compat.v1.train.exponential_decay(\n learning_rate=sch[\"weight_lr\"][w_idx],\n global_step=self.global_step,\n decay_steps=sch[\"decay_steps\"][w_idx],\n decay_rate=sch[\"decay_rate\"][w_idx],\n staircase=sch[\"staircase\"][w_idx],\n name=\"annealing_schedule_\"+weight_name)\n sch_lrs.append(learning_rates)\n if self.params.optimizer == \"sgd\":\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rates,\n name=\"grad_optimizer_\"+weight_name)\n elif self.params.optimizer == \"adam\":\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rates, beta1=0.9, beta2=0.99,\n epsilon=1e-07, name=\"adam_optimizer_\"+weight_name)\n elif self.params.optimizer == \"adadelta\":\n optimizer = tf.compat.v1.train.AdadeltaOptimizer(learning_rates, epsilon=1e-07,\n name=\"adadelta_optimizer_\"+weight_name)\n elif self.params.optimizer == \"lbfgsb\":\n optimizer = None\n else:\n assert False, (\"Optimizer \"+self.params.optimizer+\" is not supported.\")\n weight_op = self.trainable_variables[weight]\n sch_grads_and_vars.append(self.compute_weight_gradients(optimizer, weight_op))\n gstep = self.global_step if w_idx == 0 else None # Only increment once\n if self.params.optimizer == \"lbfgsb\": # BFGS doesn't actually need the update op\n if w_idx == 0:\n sch_apply_grads.append(tf.compat.v1.assign_add(self.global_step, 1))\n else:\n sch_apply_grads.append(None)\n else:\n sch_apply_grads.append(optimizer.apply_gradients(sch_grads_and_vars[w_idx],\n global_step=gstep))\n self.learning_rates.append(sch_lrs)\n self.grads_and_vars.append(sch_grads_and_vars)\n self.apply_grads.append(sch_apply_grads)\n self.optimizers_added = True", "def update_target_network(self):\n self.target.set_weights(self.policy.get_weights()) # Update weights of target network with weights of policy network", "def update_policy(self):\n self.trainer_metrics.start_policy_update_timer(\n number_experiences=len(self.training_buffer.update_buffer[\"actions\"]),\n mean_return=float(np.mean(self.cumulative_returns_since_policy_update)),\n )\n self.cumulative_returns_since_policy_update = []\n n_sequences = max(\n int(self.trainer_parameters[\"batch_size\"] / self.policy.sequence_length), 1\n )\n value_total, policy_total = [], []\n advantages = self.training_buffer.update_buffer[\"advantages\"].get_batch()\n self.training_buffer.update_buffer[\"advantages\"].set(\n (advantages - advantages.mean()) / (advantages.std() + 1e-10)\n )\n num_epoch = self.trainer_parameters[\"num_epoch\"]\n for _ in range(num_epoch):\n self.training_buffer.update_buffer.shuffle()\n buffer = self.training_buffer.update_buffer\n for l in range(\n len(self.training_buffer.update_buffer[\"actions\"]) // n_sequences\n ):\n start = l * n_sequences\n end = (l + 1) * n_sequences\n run_out = self.policy.update(\n buffer.make_mini_batch(start, end), n_sequences\n )\n value_total.append(run_out[\"value_loss\"])\n policy_total.append(np.abs(run_out[\"policy_loss\"]))\n self.stats[\"Losses/Value Loss\"].append(np.mean(value_total))\n self.stats[\"Losses/Policy Loss\"].append(np.mean(policy_total))\n for _, reward_signal in self.policy.reward_signals.items():\n update_stats = reward_signal.update(\n self.training_buffer.update_buffer, n_sequences\n )\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n if self.policy.bc_module:\n update_stats = self.policy.bc_module.update()\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n self.training_buffer.reset_update_buffer()\n self.trainer_metrics.end_policy_update()", "def update_policy(self, minibatch_size):\n \n steps = self.rewards.shape[0]\n batch_size = self.rewards.shape[0] * self.rewards.shape[1]\n #steps = 500\n #batch_size = 500\n #print(steps)\n #print(batch_size)\n \n # Compute advantages\n '''\n with torch.no_grad():\n if self.gae:\n advantages = torch.zeros_like(self.rewards).to(self.training_device)\n lastgaelam = 0\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n nextvalues = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t + 1]\n nextvalues = self.state_values[t + 1]\n delta = self.rewards[t] + self.gamma * nextvalues * nextnonterminal - self.state_values[t]\n advantages[t] = lastgaelam = delta + self.gamma * self.gae_lambda * nextnonterminal * lastgaelam\n returns = advantages + self.state_values\n else:\n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n ''' \n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n \n\n # flatten the batch\n #b_obs = self.states.reshape((-1,) + self.state_space)\n #print(self.states.shape)\n b_obs = self.states.reshape((-1,4)).detach()\n b_logprobs = self.action_probs.reshape(-1,1).detach()\n b_actions = self.actions.reshape((-1,)).detach()\n b_advantages = advantages.reshape(-1,1)\n b_returns = returns.reshape(-1,1)\n b_values = self.state_values.reshape(-1,1)\n \n # Optimize policy and value network for K epochs, run optimization in minibatches\n \n inds = np.arange(batch_size)\n for i_epoch_pi in range(self.epochs):\n np.random.shuffle(inds)\n for start in range(0, batch_size, minibatch_size):\n end = start + minibatch_size\n minibatch_ind = inds[start:end]\n mb_advantages = b_advantages[minibatch_ind]\n if self.norm_adv:\n mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)\n \n #_, newlogproba, entropy = self.get_action(b_obs[minibatch_ind], b_actions[minibatch_ind])\n newlogproba, entropy = self.evaluate(b_obs[minibatch_ind], b_actions[minibatch_ind])\n #ratio = (newlogproba - b_logprobs[minibatch_ind]).exp()\n ratio = torch.exp((newlogproba - b_logprobs[minibatch_ind].detach()))\n \n # Stats\n approx_kl = (b_logprobs[minibatch_ind] - newlogproba).mean()\n\n # Policy loss\n pg_loss1 = -mb_advantages * ratio\n pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon)\n pg_loss = torch.max(pg_loss1, pg_loss2).mean()\n entropy_loss = entropy.mean()\n\n # Value loss\n _, new_values = self.policy.forward(b_obs[minibatch_ind])\n if self.clip_vloss:\n \n v_loss_unclipped = self.MseLoss(new_values,b_returns[minibatch_ind])\n #v_loss_unclipped = ((new_values - b_returns[minibatch_ind]) ** 2)\n v_clipped = b_values[minibatch_ind] + torch.clamp(new_values - b_values[minibatch_ind],\n -self.clip_epsilon, self.clip_epsilon)\n #v_loss_clipped = (v_clipped - b_returns[minibatch_ind]) ** 2\n v_loss_clipped = self.MseLoss(v_clipped,b_returns[minibatch_ind])\n v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)\n #v_loss = 0.5 * v_loss_max.mean()\n v_loss = 0.5 * v_loss_max\n else:\n #v_loss = 0.5 * ((new_values - b_returns[minibatch_ind]) ** 2).mean()\n v_loss = self.MseLoss(new_values,b_returns[minibatch_ind])\n\n loss = pg_loss + v_loss * self.vf_coeff - self.ent_coeff * entropy_loss\n\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n self.optimizer.step()\n # Copy new weights into old policy:\n self.old_policy.load_state_dict(self.policy.state_dict())", "def update_net(optimizer):\n assert kl_train_dataset.bp_mode\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = kl_train_dataset[index]\n\n optimizer.zero_grad()\n \n num_crop = 1\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crop * 3, 224, 224]\n assert len(frames) == length * frame_cnt\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda())\n base_out = net(input_var, None, None, None, None)\n assert base_out.size(0) == frame_cnt and base_out.size(1) == base_out_dim\n step_features = base_out.mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n gate = gate.repeat(1, frame_cnt).view(frame_cnt, base_out_dim)\n assert glcu_task_pred.size(0) == 1\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0)\n if net.additive_glcu:\n base_out = base_out + gate\n else:\n base_out = base_out * gate\n\n output = net.test_fc(base_out)\n assert output.size(0) == frame_cnt and output.size(1) == output_dim\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling, bp_mode=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = net.task_head(combined_scores)\n assert task_pred.size(0) == 1\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0)\n\n loss = KL(task_pred, glcu_task_pred)\n loss.backward()\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n break\n\n optimizer.step()\n optimizer.zero_grad()\n torch.cuda.empty_cache()\n\n return float(loss.data), frame_cnt", "def optimizer(self, model: nn.Module) -> torch.optim.Optimizer: # type: ignore\n pass", "def update(self, sample, oppo_target_policy, oppo_policy, parallel=False, logger=None,iter=5):\n obs, acs, rews, next_obs, dones = sample\n\n self.critic_optimizer.zero_grad()\n # if self.alg_types[agent_i] == 'MADDPG':\n if self.discrete_action: # one-hot encode action\n if self.agent_i ==0:\n all_trgt_acs = [onehot_from_logits(pi(nobs)) for pi, nobs in\n zip([self.target_policy,oppo_target_policy], next_obs)]\n else:\n all_trgt_acs = [onehot_from_logits(pi(nobs)) for pi, nobs in\n zip([oppo_target_policy,self.target_policy], next_obs)]\n # all_trgt_acs = [onehot_from_logits(pi(nobs)) for pi, nobs in\n # zip([self.target_policy,oppo_target_policy], next_obs)]\n else:\n if self.agent_i ==0:\n all_trgt_acs = [pi(nobs) for pi, nobs in\n zip([self.target_policy,oppo_target_policy], next_obs)]\n else:\n all_trgt_acs = [pi(nobs) for pi, nobs in\n zip([oppo_target_policy,self.target_policy], next_obs)]\n # all_trgt_acs = [pi(nobs) for pi, nobs in zip(self.target_policy,\n # next_obs)]\n trgt_vf_in = torch.cat((*next_obs, *all_trgt_acs), dim=1)\n\n if self.discrete_action:\n target_value = (rews[self.agent_i].view(-1, 1) + self.gamma *\n self.target_critic(trgt_vf_in) *\n (1 - dones[self.agent_i].view(-1, 1))) #change after\n else:\n target_value = (rews[self.agent_i].view(-1, 1) + self.gamma *self.target_critic(trgt_vf_in)*(dones.view(-1, 1)))\n\n vf_in = torch.cat((*obs, *acs), dim=1)\n actual_value = self.critic(vf_in)\n vf_loss = MSELoss(actual_value, target_value.detach())\n vf_loss.backward()\n\n torch.nn.utils.clip_grad_norm(self.critic.parameters(), 0.5)\n self.critic_optimizer.step()\n\n self.policy_optimizer.zero_grad()\n\n if self.discrete_action:\n curr_pol_out = self.policy(obs[self.agent_i])\n curr_pol_vf_in = gumbel_softmax(curr_pol_out, hard=True)\n else:\n curr_pol_out = self.policy(obs[self.agent_i])\n curr_pol_vf_in = curr_pol_out\n\n all_pol_acs = []\n if self.discrete_action:\n if self.agent_i == 0:\n all_pol_acs.append(curr_pol_vf_in)\n all_pol_acs.append(onehot_from_logits(oppo_policy(obs[1])))\n else:\n all_pol_acs.append(onehot_from_logits(oppo_policy(obs[0])))\n all_pol_acs.append(curr_pol_vf_in)\n else:\n if self.agent_i == 0:\n all_pol_acs.append(curr_pol_vf_in)\n all_pol_acs.append(oppo_policy(obs[1]))\n else:\n all_pol_acs.append(oppo_policy(obs[0]))\n all_pol_acs.append(curr_pol_vf_in)\n\n #\n # for i, ob in zip(range(self.nagents), obs):\n # if i == self.agent_i-1:\n # all_pol_acs.append(curr_pol_vf_in)\n # elif self.discrete_action:\n # all_pol_acs.append(onehot_from_logits(self.policy(ob)))\n # else:\n # all_pol_acs.append(self.policy(ob))\n\n vf_in = torch.cat((*obs, *all_pol_acs), dim=1)\n\n pol_loss = -self.critic(vf_in).mean()\n pol_loss += (curr_pol_out**2).mean() * 1e-3\n pol_loss.backward()\n total_norm=0\n for p in self.policy.parameters():\n param_norm = p.grad.data.norm(2)\n total_norm += param_norm.item() ** 2\n total_norm = total_norm ** (1. / 2)\n torch.nn.utils.clip_grad_norm(self.policy.parameters(), 0.5)\n self.policy_optimizer.step()", "def _update_objective(self):\n # rewrap the cost if the solver has been run\n self.Finalize()\n return", "def _soft_update_target_network(self):\n\n # Update the target network\n for target_param, param in zip(self.actor_target_network.parameters(), self.actor_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)\n\n # Update the critic network\n for target_param, param in zip(self.critic_target_network.parameters(), self.critic_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)", "def update_policy(self):\n n_sequences = max(int(self.trainer_parameters['batch_size'] / self.policy.sequence_length), 1)\n value_total, policy_total, forward_total, inverse_total = [], [], [], []\n advantages = self.training_buffer.update_buffer['advantages'].get_batch()\n self.training_buffer.update_buffer['advantages'].set(\n (advantages - advantages.mean()) / (advantages.std() + 1e-10))\n num_epoch = self.trainer_parameters['num_epoch']\n for k in range(num_epoch):\n self.training_buffer.update_buffer.shuffle()\n buffer = self.training_buffer.update_buffer\n for l in range(len(self.training_buffer.update_buffer['actions']) // n_sequences):\n start = l * n_sequences\n end = (l + 1) * n_sequences\n run_out = self.policy.update(buffer.make_mini_batch(start, end), n_sequences)\n value_total.append(run_out['value_loss'])\n policy_total.append(np.abs(run_out['policy_loss']))\n if self.use_curiosity:\n inverse_total.append(run_out['inverse_loss'])\n forward_total.append(run_out['forward_loss'])\n self.stats['value_loss'].append(np.mean(value_total))\n self.stats['policy_loss'].append(np.mean(policy_total))\n if self.use_curiosity:\n self.stats['forward_loss'].append(np.mean(forward_total))\n self.stats['inverse_loss'].append(np.mean(inverse_total))\n self.training_buffer.reset_update_buffer()", "def run_optimization(self):\n # Get batch\n (obs, action, old_logp, old_value, return_, advantage) = self.buffer.eject()\n\n # Train pi\n print(\"-\" * 20 + \"\\nPi Update\" + \"\\n\" + \"-\" * 20)\n (policy_loss, entropy,\n kl_divergence, clipping_fraction, steps) = self.update_actor(obs, action, old_logp, advantage)\n\n # Train value function\n print(\"-\" * 20 + \"\\nValue Function Update\" + \"\\n\" + \"-\" * 20)\n (value_loss,\n explained_variance) = self.update_critic(obs, old_value, return_)\n\n # Logging\n self.update_counter += 1\n self.log_update(policy_loss, entropy, kl_divergence, clipping_fraction,\n value_loss, explained_variance, steps)\n\n # Update learning rate\n self.decay_lr()\n\n # Save current weights (overwrites previous weights)\n self.save_weights()\n\n # Empty scenario counter\n self.scenario_counter = dict.fromkeys(self.scenario_counter, 0)", "def update_policy(self, *args, **kwargs):\r\n pass", "def updates(loss: Tensor, var_list, options):\n with tf.name_scope(\"optimization\"):\n if options['update'] == 'momentum':\n optimizer = tf.train.MomentumOptimizer(learning_rate=options['learning_rate'],\n momentum=options['momentum'])\n elif options['update'] == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate=options['learning_rate'])\n elif options['update'] == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(learning_rate=options['learning_rate'])\n else:\n assert False, \"Unknown loss minimizer\"\n update_step = optimizer.minimize(loss, var_list=var_list)\n return optimizer, update_step", "def update(self):\n\n # get states, actions, rewards and total timesteps from memory\n states, actions, R, T = self.memory.get()\n n_ep = len(R)\n\n # compute value estimates for the states\n v = self.critic(states)\n\n # compute advantages (using GAE) and rewards to go\n A, rtg = utils.gae_rtg((R, v, T), self.gam, self.lam)\n\n # store the initial version of both the policy and the log probs of the\n # actions for later comparison with the future versions (needed for PPO)\n policy_old = copy.deepcopy(self.policy)\n log_probs_old = policy_old(states).log_prob(actions)\n\n # sample from a batch of experiences\n # (\"_\" subscript indicates \"sampled from\")\n for (v_, A_, rtg_, log_probs_old_), i in utils.sample_batch((v, A, rtg, log_probs_old), self.batch_size, self.policy_updates):\n log_probs_ = self.policy(states).log_prob(actions)[i]\n\n # estimate ratio between the new log probs and the old ones\n r_ = torch.exp(log_probs_ - log_probs_old_)\n\n l_1 = r_ * A_\n l_2 = torch.clamp(r_, 1-self.eps, 1+self.eps) * A_\n\n # TODO: implement entropy\n # TODO: merge policy and critic\n\n # surragate loss function for PPO\n l_clip = -torch.mean(torch.min(l_1, l_2))\n\n # update the policy\n self.policy_optimizer.zero_grad()\n l_clip.backward(retain_graph=True)\n self.policy_optimizer.step()\n\n # sample a batch of value estimates and the corresponding rewards to go\n # to update the value function.\n for (v_, rtg_), _ in utils.sample_batch((v, rtg), self.batch_size, self.v_updates):\n # compute the loss\n critic_loss = F.mse_loss(v_, rtg_)\n\n # update the critic\n self.critic_optimizer.zero_grad()\n critic_loss.backward(retain_graph=True)\n self.critic_optimizer.step()\n\n # clear the memory. PPO is an On-Policy method so we don't need these\n # memories anymore\n self.memory.clear()\n\n # return the loss of the value function for display\n return F.mse_loss(v, rtg)", "def update(self, ex):\r\n if not self.optimizer:\r\n raise RuntimeError('No optimizer set.')\r\n\r\n # Train mode\r\n self.network.train()\r\n\r\n source_ids = ex['source_ids']\r\n source_pos_ids = ex['source_pos_ids']\r\n source_type_ids = ex['source_type_ids']\r\n source_mask = ex['source_mask']\r\n label = ex['label']\r\n\r\n if self.use_cuda:\r\n label = label.cuda(non_blocking=True)\r\n source_ids = source_ids.cuda(non_blocking=True)\r\n source_pos_ids = source_pos_ids.cuda(non_blocking=True) \\\r\n if source_pos_ids is not None else None\r\n source_type_ids = source_type_ids.cuda(non_blocking=True) \\\r\n if source_type_ids is not None else None\r\n source_mask = source_mask.cuda(non_blocking=True) \\\r\n if source_mask is not None else None\r\n\r\n # Run forward\r\n score = self.network(source_ids=source_ids,\r\n source_pos_ids=source_pos_ids,\r\n source_type_ids=source_type_ids,\r\n source_mask=source_mask)\r\n\r\n # Compute loss and accuracies\r\n loss = self.criterion(score, label)\r\n\r\n if self.args.gradient_accumulation_steps > 1:\r\n loss = loss / self.args.gradient_accumulation_steps\r\n\r\n if self.args.fp16:\r\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\r\n scaled_loss.backward()\r\n else:\r\n loss.backward()\r\n\r\n if (self.updates + 1) % self.args.gradient_accumulation_steps == 0:\r\n if self.args.fp16:\r\n torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.args.grad_clipping)\r\n else:\r\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), self.args.grad_clipping)\r\n\r\n self.optimizer.step()\r\n self.scheduler.step() # Update learning rate schedule\r\n self.optimizer.zero_grad()\r\n\r\n self.updates += 1\r\n\r\n return loss.item()", "def update_estimator(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def update_estimator(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def update_policy(self, req):\n\n with self.mutex:\n\n # Apply all gradients\n for gradient in req.gradients:\n\n self.global_policy.zero_grad()\n\n # Transfer gradient to global policy\n self.transfer_gradient(gradient)\n\n # Clip gradient\n torch.nn.utils.clip_grad_norm_(self.global_policy.parameters(),\n 1.0)\n # torch.nn.utils.clip_grad_value_(self.global_policy.parameters(),\n # 1.0)\n\n # Apply gradient\n self.optimizer.step()\n self.iteration_counter += 1\n\n self._log_iteration(req)\n\n if (self.save_path and\n self.save_steps > 0 and\n self.iteration_counter >= self.save_steps * self.i):\n self.save_model()\n self.i += 1\n\n weights = self.encode_weights()\n cov = max(1.0 - self.iteration_counter /\n float(self.exploration), 0.05)\n covariance = [cov, cov]\n\n return UpdateGlobalPolicyResponse(weights, covariance)", "def update_policy(self):\n # this is update_policy \n # sample batch of 32 from the memory\n batch_of_samples = self.replay_memory.sample(batch_size=32)\n current_state_samples = batch_of_samples['current_state_samples']\n next_state_samples = batch_of_samples['next_state_samples']\n #print type(current_state_samples[0])\n #print current_state_samples\n\n # fetch stuff we need from samples 32*84*84*4\n current_state_images = np.zeros([1, 84, 84, 4])\n #print current_state_samples\n current_state_images[0,...] = np.dstack([sample.state for sample in current_state_samples])\n\n next_state_images = np.zeros([1, 84, 84, 4])\n next_state_images[0,...] = np.dstack([sample.state for sample in next_state_samples])\n\n # preprocess\n current_state_images = self.preprocessor.process_batch(current_state_images)\n next_state_images = self.preprocessor.process_batch(next_state_images)\n # print \"current_state_images {} max {} \".format(current_state_images.shape, np.max(current_state_images))\n #print current_state_images.shape\n q_current = self.q_network.predict(current_state_images,batch_size=self.batch_size) # 32*num_actions\n q_next = self.q_network.predict(next_state_images,batch_size=self.batch_size)\n\n # targets\n y_targets_all = q_current #1*num_actions\n #print y_targets_all.shape # [1,6]\n idx = 0 \n last_sample = current_state_samples[-1]\n if last_sample.is_terminal:\n y_targets_all[idx, last_sample.action] = last_sample.reward\n else:\n if self.mode == 'vanilla':\n y_targets_all[idx, last_sample.action] = np.float32(last_sample.reward) + self.gamma*np.max(q_next[idx])\n if self.mode == 'double': \n y_targets_all[idx, last_sample.action] = np.float32(last_sample.reward) + self.gamma*q_next[idx, np.argmax(q_current[idx])] \n\n loss = self.q_network.train_on_batch(current_state_images, np.float32(y_targets_all))\n\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='train_loss', value=loss, step=self.iter_ctr)\n\n if not (self.iter_ctr % self.log_loss_every_nth):\n self.dump_train_loss(loss)\n\n # if (self.iter_ctr > (self.num_burn_in+1)) and not(self.iter_ctr%self.target_update_freq):\n # # copy weights\n # print \"Iter {} Updating target Q network\".format(self.iter_ctr)\n # self.target_q_network.set_weights(self.q_network.get_weights())\n # [self.target_q_network.trainable_weights[i].assign(self.q_network.trainable_weights[i]) \\\n # for i in range(len(self.target_q_network.trainable_weights))]", "def optimizer(self):\n return 'sgd'", "def _update_target_model(self):\n self.target_network.model.set_weights(self.policy_network.model.get_weights())", "def optimizer(self):\n \n # taken from https://github.com/germain-hug/Deep-RL-Keras/blob/master/DDPG/actor.py\n # I believe this is a work around to get keras to learn **given a gradient**\n # As opposed to bunch of x_train, y_trains?\n \n #Inputs\n state_pl = self.model.input\n action_grads_pl = K.placeholder(shape=(None,1)) \n \n #Find grad_(pars) mu(state)\n mu_pl = self.model.output\n pars = self.model.trainable_weights\n pars_grad_mu = tf.gradients(mu_pl, pars, -action_grads_pl)\n \n #grads_and_pars = zip(pars_grad_mu, pars) #keras needs this form\n #updates = tf.train.AdamOptimizer(self.lr).apply_gradients(grads_and_pars)\n\n # The gradients as defined above work on my mac, but not ubuntu.\n # Below I am trying a workaround. I changed the keras source code \n # To get this working. Specifically, I make the optimizer.get_updates()\n # function accept custom gradients. It was easy to do.\n \n opt = Adam(self.lr)\n loss = pars_grad_mu #placeholder, I won't use it\n updates = opt.get_updates(loss = loss, params = pars, grads = pars_grad_mu)\n\n return K.function(inputs = [state_pl, action_grads_pl], outputs = [], updates = updates)\n #return K.function(inputs = [state_pl, action_grads_pl], outputs = [updates])", "def _optimize(self) -> None:\n\n for i, agent in enumerate(self.agents):\n states, actions, rewards, next_states, dones = self.memory.sample()\n\n actor_next_state = self._agent_states(i, next_states)\n next_actions = torch.cat(\n [a.actor_target(actor_next_state) for a in self.agents], 1\n )\n next_q = agent.critic_target(next_states, next_actions).detach()\n target_q = rewards[:, i].view(-1, 1) + self.gamma * next_q * (\n 1 - dones[:, i].view(-1, 1)\n )\n local_q = agent.critic_local(states, actions)\n\n value_loss = agent.loss_fn(local_q, target_q)\n agent.value_optimizer.zero_grad()\n value_loss.backward()\n agent.value_optimizer.step()\n\n local_actions = []\n for i, a in enumerate(self.agents):\n local_states = self._agent_states(i, states)\n local_actions.append(\n a.actor_local(local_states)\n if a == agent\n else a.actor_local(local_states).detach()\n )\n local_actions = torch.cat(local_actions, 1)\n policy_loss = -agent.critic_local(states, local_actions).mean()\n\n agent.policy_optimizer.zero_grad()\n policy_loss.backward()\n agent.policy_optimizer.step()\n\n self._update_target_model(agent.critic_local, agent.critic_target)\n self._update_target_model(agent.actor_local, agent.actor_target)", "def _update_optimizer(self, hyperparameters, score, fit=True):\n if self.do_maximize:\n score = -score\n self.optimizer_result = self.optimizer.tell(hyperparameters, score, fit=fit)", "def updatelearningrate(self, epoch):\n self.lr = getlearningrate(epoch=epoch, opt=self.opt)\n # update learning rate of model optimizer\n if isinstance(self.model, list):\n count = 0\n for param_group in self.optimzer.param_groups:\n # if type(model) is <list> then update modules with different learning rate\n param_group['lr'] = self.lr\n count += 1\n # print \">>> count is:\", count-1\n else:\n for param_group in self.optimzer.param_groups:\n param_group['lr'] = self.lr", "def check_and_reset_optimizer(self):\n current_epoch = self.hparams.epoch_counter.current\n if not hasattr(self, \"switched\"):\n self.switched = False\n if isinstance(self.optimizer, torch.optim.SGD):\n self.switched = True\n\n if self.switched is True:\n return\n\n if current_epoch > self.hparams.stage_one_epochs:\n self.optimizer = self.hparams.SGD(self.modules.parameters())\n\n if self.checkpointer is not None:\n self.checkpointer.add_recoverable(\"optimizer\", self.optimizer)\n\n self.switched = True", "def _cook_optimizer(self, \n lr = 0.01, \n optimizer = 'sgd',\n l1_coeff = 0.00001,\n l2_coeff = 0.00001):\n with tf.variable_scope (self.name + '_train') as scope:\n apply_regularizer (name = self.name, var_list = tf.get_collection(\n self.name + '_regularizer_worthy_params'), \n l1_coeff = l1_coeff,\n l2_coeff = l2_coeff )\n self.obj = tf.add_n(tf.get_collection( self.name + '_objectives'), name='objective')\n tf.summary.scalar('total_objective', self.obj)\n\n # Change (supply as arguments) parameters here directly in the code.\n if optimizer == 'sgd': \n self.back_prop = apply_gradient_descent(var_list = tf.get_collection(\n self.name + '_trainable_params'),\n obj = self.obj, learning_rate = lr )\n elif optimizer == 'adagrad': \n self.back_prop = apply_adagrad(var_list = tf.get_collection(\n self.name + '_trainable_params'),\n obj = self.obj, learning_rate = lr ) \n elif optimizer == 'rmsprop':\n self.back_prop = apply_rmsprop(var_list = tf.get_collection(\n self.name + '_trainable_params') ,\n obj = self.obj, learning_rate = lr)\n elif optimizer == 'adam':\n self.back_prop = apply_adam (var_list = tf.get_collection(\n self.name + '_trainable_params') ,\n obj = self.obj, learning_rate = lr )\n else:\n raise Error('Invalid entry to optimizer')", "def update_parameters(self, ob_no, hidden, ac_na, fixed_log_probs, q_n, adv_n):\n self.update_critic(ob_no, hidden, q_n)\n self.update_policy(ob_no, hidden, ac_na, fixed_log_probs, adv_n)", "def make_optimizer(self, train_var_filter):\n # According from the prototxt in Caffe implement, learning rate must multiply by 10.0 in pyramid module\n fc_list = ['conv5_3_pool1_conv', 'conv5_3_pool2_conv', 'conv5_3_pool3_conv', 'conv5_3_pool6_conv', 'conv6',\n 'conv5_4']\n all_trainable = [v for v in tf.trainable_variables() if\n ('beta' not in v.name and 'gamma' not in v.name) or True]\n fc_trainable = [v for v in all_trainable if v.name.split('/')[0] in fc_list]\n conv_trainable = [v for v in all_trainable if v.name.split('/')[0] not in fc_list] # lr * 1.0\n fc_w_trainable = [v for v in fc_trainable if 'weights' in v.name] # lr * 10.0\n fc_b_trainable = [v for v in fc_trainable if 'biases' in v.name] # lr * 20.0\n assert (len(all_trainable) == len(fc_trainable) + len(conv_trainable))\n assert (len(fc_trainable) == len(fc_w_trainable) + len(fc_b_trainable))\n\n with tf.control_dependencies(self.update_ops):\n opt_conv = tf.train.MomentumOptimizer(self.lr_op, self.momentum)\n opt_fc_w = tf.train.MomentumOptimizer(self.lr_op * 10.0, self.momentum)\n opt_fc_b = tf.train.MomentumOptimizer(self.lr_op * 20.0, self.momentum)\n\n grads = tf.gradients(self.loss, conv_trainable + fc_w_trainable + fc_b_trainable)\n grads_conv = grads[:len(conv_trainable)]\n grads_fc_w = grads[len(conv_trainable): (len(conv_trainable) + len(fc_w_trainable))]\n grads_fc_b = grads[(len(conv_trainable) + len(fc_w_trainable)):]\n\n train_op_conv = opt_conv.apply_gradients(zip(grads_conv, conv_trainable), global_step=self.global_step)\n train_op_fc_w = opt_fc_w.apply_gradients(zip(grads_fc_w, fc_w_trainable))\n train_op_fc_b = opt_fc_b.apply_gradients(zip(grads_fc_b, fc_b_trainable))\n\n self.optimizer = tf.group(train_op_conv, train_op_fc_w, train_op_fc_b)", "def update_target_net(self, sess):\n sess.run(self.update_target_net_op)", "def optimize(self):\n\n self.logger.info(\"Solving with Dynamic Slope Scaling Procedure in Julia :\")\n optimization_start = time.time()\n\n # 1. Preprocess for old network graph\n if self.old_network_graph is not None:\n\n # DSSP on old network\n old_network_obj = sum(list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values()))-1e-5\n try:\n self.check_infeasibility(self.old_network_graph, old_network_obj)\n except DHCOptimizerException as e:\n e.data = \"Invalid existing network: \" + e.data\n raise e\n\n flows, obj_val = self.optimize_with_dssp_julia(self.old_network_graph, old_network_obj, set())\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n solution_old_graph = self.build_solution_graph(self.old_network_graph, flows)\n\n if self.modify_old_network:\n\n # Add max capacity on old edges\n self.old_capacity = deepcopy(flows)\n old_buildings = list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values())\n for key in flows:\n if (key[1],key[0],0) not in self.old_capacity and key[1] not in old_buildings:\n self.old_capacity[(key[1],key[0],0)] = self.old_capacity[key]\n\n # Add Imaginary edges\n for edge in self.old_capacity:\n if self.optimization_graph.has_edge(*edge):\n\n # add nodes\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[0])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[0]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[0]][config.GPD_GEO_KEY]\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[1])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[1]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[1]][config.GPD_GEO_KEY]\n # add edges\n if not self.optimization_graph.has_edge(edge[0],config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_edge(edge[0],config.IM_PREFIX+edge[0])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[1],edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[1],edge[1])\n\n # put cost\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY]\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(edge[0],config.IM_PREFIX+edge[0],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(config.IM_PREFIX+edge[1],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n\n else:\n # if we don't modify the old network, we have to change the capacity of the supplies\n already_consummed = {}\n for edge in solution_old_graph.edges():\n if solution_old_graph.nodes[edge[0]].get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n already_consummed[edge[0]] = already_consummed.get(edge[0], 0) + \\\n solution_old_graph.edges[edge][config.SOLUTION_POWER_FLOW_KEY]\n for source in already_consummed:\n if already_consummed[source] <= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]:\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] -= already_consummed[source]\n self.network_objective -= already_consummed[source]\n else:\n self.network_objective -= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] = 0\n\n # Remove edges from old network\n edges_to_remove = set()\n for e in self.optimization_graph.edges():\n if self.old_network_graph.has_edge(*e) or self.old_network_graph.has_edge(e[1],e[0]):\n edges_to_remove.add(e)\n self.optimization_graph.remove_edges_from(edges_to_remove)\n\n # Remove isolated buildings of optimization graph\n isolated_to_remove = set()\n for e in self.old_network_graph.edges():\n if e[0] in self.old_network_graph.nodes() and \\\n self.optimization_graph.nodes[e[1]].get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n isolated_to_remove.add(e)\n self.optimization_graph.remove_edges_from(isolated_to_remove)\n\n # Remove buildings from old network\n for n, data in self.old_network_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n self.optimization_graph.remove_node(n)\n\n # Re-link sources\n sources = set()\n for n, data in self.optimization_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n sources.add(n)\n source_graph = self.optimization_graph.subgraph(sources).copy()\n self.optimization_graph.remove_nodes_from(sources)\n gnx.remove_isolates(self.optimization_graph)\n node_filter = lambda n: self.optimization_graph.nodes.get(n,{}).get(config.NODE_TYPE_KEY) != config.BUILDING_NODE_TYPE\n gnx.spatial_points_merge(self.optimization_graph, source_graph.nodes_to_gdf(), node_filter=node_filter, inplace=True)\n\n # fill missing information\n gnx.fill_edges_missing_geometry_attributes(self.optimization_graph)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_LENGTH_KEY, only_missing=True)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_COST_KEY, only_missing=True)\n for e in self.optimization_graph.edges(keys=True):\n self.optimization_graph.edges[e][config.LEASTCOST_COEF_KEY] = \\\n self.optimization_graph.edges[e].get(config.LEASTCOST_COEF_KEY,0)\n\n\n\n # 2. Process the DSSP on optimization graph\n self.check_is_ready()\n self.check_infeasibility(self.optimization_graph, self.network_objective)\n\n if self.old_network_graph is not None and self.modify_old_network:\n old_buildings = set(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).keys())\n else:\n old_buildings = set()\n flows, obj_val = self.optimize_with_dssp_julia(self.optimization_graph, self.network_objective, old_buildings,postprocess= (not self.modify_old_network))\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n self.solution_graph = self.build_solution_graph(self.optimization_graph, flows, self.connected)\n\n # 3. Postprocess for old network graph\n if self.old_network_graph is not None:\n \n if self.modify_old_network:\n # Put the right supply capacity and cost\n for edge in self.old_capacity:\n if self.solution_graph.has_edge(edge[0],edge[1]):\n self.solution_graph.edges[(edge[0],edge[1])][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY]\n \n # Remove imaginary edges\n imaginary_nodes_to_remove = set()\n nodes_to_relabel = {}\n for edge in self.solution_graph.edges():\n if str(edge[0]).startswith(config.IM_PREFIX) and str(edge[1]).startswith(config.IM_PREFIX):\n real_edge = edge[0][len(config.IM_PREFIX):],edge[1][len(config.IM_PREFIX):]\n self.old_capacity[(real_edge[0], real_edge[1], 0)] = pd.np.inf\n self.old_capacity[(real_edge[1], real_edge[0], 0)] = pd.np.inf\n if not self.solution_graph.has_edge(*real_edge):\n for i in range(2):\n nodes_to_relabel[edge[i]] = real_edge[i]\n else:\n self.solution_graph.edges[real_edge[0],real_edge[1]][config.SOLUTION_POWER_FLOW_KEY] += \\\n self.solution_graph.edges[edge].get(config.SOLUTION_POWER_FLOW_KEY,0)\n imaginary_nodes_to_remove.add(edge[0])\n imaginary_nodes_to_remove.add(edge[1])\n elif str(edge[0]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[0])\n elif str(edge[1]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[1])\n\n nx.relabel_nodes(self.solution_graph, nodes_to_relabel, copy=False)\n self.solution_graph.remove_nodes_from(list(imaginary_nodes_to_remove))\n for node in nodes_to_relabel.values():\n if self.solution_graph.has_edge(node, node):\n self.solution_graph.remove_edge(node, node)\n\n else:\n for source in nx.get_node_attributes(self.solution_graph, config.SUPPLY_POWER_CAPACITY_KEY):\n self.solution_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n\n return flows, obj_val", "def update(self, ex):\n if not self.optimizer:\n raise RuntimeError('No optimizer set.')\n\n # Train mode\n self.network.train()\n\n if self.use_cuda:\n for key in ex:\n #if isinstance(ex[key], torch.Tensor):\n try:\n ex[key] = ex[key].cuda(non_blocking=True)\n except:\n pass\n\n # Run forward\n net_loss = self.network(ex)\n\n loss = net_loss[\"total_loss\"]\n\n loss.backward()\n\n clip_grad_norm_(self.network.parameters(), self.args.grad_clipping)\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n self.updates += 1\n return {\n 'loss': loss,\n \"loc_loss\": net_loss[\"loc_loss\"],\n \"fix_loss\": net_loss[\"target_loss\"],\n }", "def update(self, batch, batch_index):\n super(BayesianOptimization, self).update(batch, batch_index)\n self.state['n_evidence'] += self.batch_size\n\n params = batch_to_arr2d(batch, self.target_model.parameter_names)\n self._report_batch(batch_index, params, batch[self.target_name])\n\n optimize = self._should_optimize()\n self.target_model.update(params, batch[self.target_name], optimize)\n if optimize:\n self.state['last_GP_update'] = self.target_model.n_evidence", "def ppo_update(policy_model, policy_optimizer, baseline_model, baseline_optimizer, baseline_criterion,\n ppo_epoch, minibatch_size, obs_n_tensor, log_prob_n_old_tensor, action_n_tensor, rewards_n, mask_n,\n gamma, lam, update_policy=True, update_baseline=True):\n\n ## get number of data\n n_data = obs_n_tensor.shape[0]\n ## get baseline estimations\n baseline_n = baseline_model(obs_n_tensor).detach() ## between -1 and 1\n ## get q values, used for scaling baseline values\n q_n = get_reward_to_go(rewards_n, mask_n, gamma)\n q_n = Tensor(q_n).reshape(-1, 1)\n q_n_mean = q_n.mean()\n q_n_std = q_n.std() + 1e-2\n ## get scaled baseline values\n baseline_n_scaled = baseline_n * q_n_std + q_n_mean\n\n ## get advantage and scaled baseline targets\n ## the adv returned is normalized, baseline_target_scaled is not normalized\n adv_n, baseline_target_n_scaled = get_gae_advantage(rewards_n, baseline_n_scaled, mask_n, gamma, lam)\n adv_n = adv_n.detach()\n\n ## get baseline targets\n baseline_target_n = ((baseline_target_n_scaled - q_n_mean) / q_n_std).detach() ## target now is normalized\n\n ## NOTE MAKE SURE YOU DETACH THINGS OTHERWISE YOU WILL HAVE VERY STRANGE ERRORS\n\n ## for each training iteration, do some epoches\n for i_epoch in range(ppo_epoch):\n ## ppo: shuffle data\n shuffle_indexes = torch.randperm(n_data)\n obs_n_tensor = obs_n_tensor[shuffle_indexes]\n adv_n = adv_n[shuffle_indexes]\n log_prob_n_old_tensor = log_prob_n_old_tensor[shuffle_indexes]\n action_n_tensor = action_n_tensor[shuffle_indexes]\n baseline_target_n = baseline_target_n[shuffle_indexes]\n\n ## after shuffle data, we do the minibatch ppo update\n n_minibatch = int(n_data / minibatch_size)\n for i_minibatch in range(n_minibatch):\n ## get minibatch\n istart = i_minibatch * minibatch_size\n iend = (i_minibatch + 1) * minibatch_size\n obs_m = obs_n_tensor[istart:iend]\n adv_m = adv_n[istart:iend]\n log_prob_old_m = log_prob_n_old_tensor[istart:iend]\n action_m = action_n_tensor[istart:iend]\n baseline_target_m = baseline_target_n[istart:iend]\n\n ## update baseline\n if update_baseline:\n baseline_m = baseline_model(obs_m)\n baseline_optimizer.zero_grad()\n baseline_loss = baseline_criterion(baseline_m, baseline_target_m)\n baseline_loss.backward()\n baseline_optimizer.step()\n\n ## update policy\n ## we need the new policy's log probs so that we can calculate importance sampling term\n if update_policy:\n mu, log_sigma = policy_model(obs_m)\n normal_dist = Normal(mu, log_sigma.exp())\n log_prob_new_m = normal_dist.log_prob(action_m)\n log_prob_new_m = torch.sum(log_prob_new_m, dim=1).reshape(-1, 1)\n ## now we get the importance sampling weight term\n is_term_m = (log_prob_new_m - log_prob_old_m).exp()\n\n ## compute the clipped surrogate\n epsilon = 0.2\n first_term = is_term_m * adv_m\n clipped_term = is_term_m.clamp(1 - epsilon, 1 + epsilon) * adv_m\n obj_term = torch.min(first_term, clipped_term)\n obj_sum = obj_term.sum()\n policy_loss = -obj_sum / n_data\n policy_optimizer.zero_grad()\n policy_loss.backward()\n policy_optimizer.step()", "def update_policy_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def add_optimizer(self):\n \n with tf.variable_scope(\"optimizer\"):\n\n # Define optimizer and minimize loss\n if self.OPTIM == \"RMSProp\":\n self.optimizer = tf.train.RMSPropOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n \n elif self.OPTIM == \"GD\":\n self.optimizer = tf.train.GradientDescentOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n \n elif self.OPTIM == \"Adam\":\n self.optimizer = tf.train.AdamOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n\n # Merge all summaries for tensorboard\n #self.tbsummaries = tf.summary.merge_all()", "def step_and_update_lr(self):\r\n self._update_learning_rate()\r\n self._optimizer.step()", "def update_network(self):\n\n device = torch.device(\"cpu\")\n self.model = ProLoNet(input_dim=13,\n weights=None,\n comparators=None,\n leaves=32,\n output_dim=1,\n bayesian_embedding_dim=8,\n alpha=1.5,\n use_gpu=False,\n vectorized=True,\n is_value=True).to(device)\n\n self.embedding_optimizer = torch.optim.RMSprop([{'params': self.model.bayesian_embedding.parameters()}], lr=.1)\n self.embedding_list = [torch.ones(3) * 1 / 3 for i in range(2000)]\n self.opt = torch.optim.RMSprop(\n [{'params': list(self.model.parameters())[:-1]}, {'params': self.model.bayesian_embedding.parameters(), 'lr': .01}], lr=.01)\n\n criterion = torch.nn.BCELoss()\n\n n_epochs = 4000 + self.global_schedule_num * 3\n for epoch in range(n_epochs):\n which_schedule = np.random.randint(len(self.data_so_far))\n timestep_within_schedule = np.random.randint(len(self.teacher_actions[which_schedule]))\n\n index_within_network_state = timestep_within_schedule * 20\n timestep_data_from_agg = self.data_so_far[which_schedule][index_within_network_state:index_within_network_state+20]\n task = self.teacher_actions[which_schedule][timestep_within_schedule]\n # set the embedding\n self.model.set_bayesian_embedding(self.embedding_list[which_schedule].clone())\n # update loop\n\n phi_i_num = task\n phi_i = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, phi_i_num)\n phi_i_numpy = np.asarray(phi_i)\n loss_counter = 0\n # iterate over pairwise comparisons\n for counter in range(0, 0 + 20):\n if counter == phi_i_num:\n continue\n else:\n phi_j = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, counter)\n phi_j_numpy = np.asarray(phi_j)\n feature_input = phi_i_numpy - phi_j_numpy\n\n if self.use_gpu:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda())\n label = Variable(torch.Tensor(torch.ones((1, 1))).cuda())\n else:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)))\n label = Variable(torch.Tensor(torch.ones((1, 1))))\n sig = torch.nn.Sigmoid()\n output = sig(self.model(feature_input))\n loss = criterion(output, label)\n # prepare optimizer, compute gradient, update params\n loss_counter += loss.item()\n self.opt.zero_grad()\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n\n for counter in range(0, 0 + 20):\n if counter == phi_i_num:\n continue\n else:\n phi_j = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, counter)\n phi_j_numpy = np.asarray(phi_j)\n feature_input = phi_j_numpy - phi_i_numpy\n\n if self.use_gpu:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda())\n label = Variable(torch.Tensor(torch.zeros((1, 1))).cuda())\n else:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)))\n label = Variable(torch.Tensor(torch.zeros((1, 1))))\n sig = torch.nn.Sigmoid()\n output = sig(self.model.forward(feature_input))\n\n self.opt.zero_grad()\n loss = criterion(output, label)\n loss_counter += loss.item()\n\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n self.loss_array.append(loss_counter / 38)\n self.embedding_list[which_schedule] = torch.Tensor(self.model.get_bayesian_embedding().detach().cpu().numpy()).clone() # very ugly", "def update_network(self, loss_dict):\r\n loss = sum(loss_dict.values())\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()", "def reset_optimizer(self, opt = tfk.optimizers.Adam):\n self.optimizer = opt(1e-4)\n return", "def update_weights(architecture,grad_weights,grad_bias,m,v,t,lr,optimizer=\"adam\"):\n \n for layer in range(len(architecture)):\n if not (grad_weights['layer{}'.format(layer+1)] is None) and grad_bias['layer{}'.format(layer+1)] is not None:\n grad_weightsi = grad_weights['layer{}'.format(layer+1)]\n grad_weightsi /= bs\n grad_biasi = grad_bias['layer{}'.format(layer+1)]\n grad_biasi /= bs\n\n \n if optimizer.lower()==\"sgd\":\n # Mini-Batch SGD\n qw = lr*grad_weightsi\n qb = lr*grad_biasi\n else:\n # Mini-Batch Adam\n mw,mb = m['layer{}'.format(layer+1)]\n vw,vb = v['layer{}'.format(layer+1)]\n qw,mw,vw = adam(grad_weightsi,beta_1,beta_2,mw,vw,t,lr) # Have obtained dw\n qb,mb,vb = adam(grad_biasi,beta_1,beta_2,mb,vb,t,lr) # Have obtained db\n\n architecture['layer{}'.format(layer+1)][2].requires_grad = False\n architecture['layer{}'.format(layer+1)][3].requires_grad = False\n # Updating weights and biases now\n try:\n architecture['layer{}'.format(layer+1)][2] -= torch.Tensor(qw)\n except:\n architecture['layer{}'.format(layer+1)][2] -= torch.t(torch.Tensor(qw))\n try:\n architecture['layer{}'.format(layer+1)][3] -= torch.Tensor(qb)\n except:\n architecture['layer{}'.format(layer+1)][3] -= torch.t(torch.Tensor(qb))\n\n m['layer{}'.format(layer+1)][0] = torch.Tensor(mw)\n m['layer{}'.format(layer+1)][1] = torch.Tensor(mb)\n v['layer{}'.format(layer+1)][0] = torch.Tensor(vw)\n v['layer{}'.format(layer+1)][1] = torch.Tensor(vb)\n grad_weights['layer{}'.format(layer+1)] = torch.zeros(grad_weightsi.shape)\n grad_bias['layer{}'.format(layer+1)] = torch.zeros(grad_biasi.shape)\n return grad_weights,grad_bias,m,v", "def PPO(parclass):\n class PPO(parclass):\n \"\"\"\n Proximal Policy Optimization algorithm (PPO).\n Requires parent class inherited from A2C.\n Based on: https://arxiv.org/abs/1707.06347\n \n Args:\n ppo_clip - clipping rate of pi_new / pi_old fraction\n epochs - number of epochs to run through rollout on each update\n batch_size - size of mini-batch to select without replacement on each gradient ascent step\n \"\"\"\n __doc__ += parclass.__doc__\n PARAMS = parclass.PARAMS | {\"ppo_clip\", \"epochs\", \"batch_size\"} \n \n def __init__(self, config):\n super().__init__(config)\n \n self.config.setdefault(\"ppo_clip\", 0.2)\n self.config.setdefault(\"epochs\", 3)\n self.config.setdefault(\"batch_size\", 32)\n \n assert self.env.num_envs * self.config.rollout >= self.config.batch_size, \"batch_size is bigger than rollout * number of threads!\"\n \n def optimized_function(self):\n # advantage is estimated using advantage function for previous policy\n # so we take advantage from original rollout\n advantages = (self.returns_b - self.old_values_b).detach() #self.advantages_b.detach() - КОСТЫЛЬ\n \n # importance sampling for making an update of current policy using samples from old policy\n # the gradients to policy will flow through the numerator.\n ratio = torch.exp(self.action_log_probs_b - self.old_action_log_probs_b.detach())\n \n # PPO clipping! Prevents from \"too high updates\".\n surr1 = ratio * advantages\n surr2 = torch.clamp(ratio, 1.0 - self.config.ppo_clip, 1.0 + self.config.ppo_clip) * advantages\n return -torch.min(surr1, surr2)\n \n def update(self):\n \"\"\"One step of optimization based on rollout memory\"\"\"\n with torch.no_grad():\n self.preprocess_rollout()\n \n # DEEP-RL TUTORIALS: КОСТЫЛЬ\n #self.advantages = self.returns[:-1] - self.values[:-1]\n #self.advantages = (self.advantages - self.advantages.mean()) / (self.advantages.std() + 1e-5)\n \n # going through rollout several (config.epochs) times:\n for epoch in range(self.config.epochs):\n # TODO: drop last = False? What if there is 1 sample?\n sampler = BatchSampler(SubsetRandomSampler(range(self.env.num_envs * self.config.rollout)), self.config.batch_size, drop_last=False)\n \n for indices in sampler:\n # retrieving new batch as part of rollout\n self.returns_b = self.returns.view(-1, *self.config.value_repr_shape)[indices]\n self.old_values_b = self.values.view(-1, *self.config.value_repr_shape)[indices]\n self.old_action_log_probs_b = self.action_log_probs.view(-1)[indices]\n #self.advantages_b = self.advantages.view(-1)[indices] # КОСТЫЛЬ\n \n # calculating current value, action_log_prob, entropy\n dist, self.values_b = self.policy(self.observations.view(-1, *self.config.observation_shape)[indices])\n self.values_b = self.values_b.squeeze() # IMPORTANT ([32] - [32, 1] problem)\n self.action_log_probs_b = dist.log_prob(self.actions.view(-1, *self.config.actions_shape)[indices])#.sum(dim=-1) \n self.entropy_b = dist.entropy()#.sum(dim=-1)\n \n # performing step\n self.gradient_ascent_step()\n return PPO", "def optimize_parameters(self):\n # forward\n for i in range(min(self.big_iter+1,len(self.orders_rev))):\n if(self.orders_rev):\n # compute fake images and reconstruction images.\n self.forward(i,False)\n # G_A and G_B\n # Ds require no gradients when optimizing Gs\n self.set_requires_grad(self.netD, False)\n # set G_A and G_B's gradients to zero\n self.optimizers_G[self.orders_rev[i]].zero_grad()\n # calculate gradients for G_A and G_B\n self.backward_G(i,False)\n # update G_A and G_B's weights\n self.optimizers_G[self.orders_rev[i]].step()\n # D_A and D_B\n self.set_requires_grad(self.netD, True)\n self.optimizer_D.zero_grad() \n self.backward_D(i,False) \n self.optimizer_D.step() \n else:\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_DY() # calculate gradients for D_A\n self.optimizer_D.step()\n for i in range(min(self.big_iter+1, len(self.orders))):\n if(self.orders):\n if(i>0):\n self.real_A = self.fake_B.detach()\n self.forward(i,True) # compute fake images and reconstruction images.\n # G_A and G_B\n # Ds require no gradients when optimizing Gs\n self.set_requires_grad(self.netD, False)\n # set G_A and G_B's gradients to zero\n self.optimizers_G[self.orders[i]].zero_grad()\n self.backward_G(i,True) # calculate gradients for G_A and G_B\n # update G_A and G_B's weights\n self.optimizers_G[self.orders[i]].step()\n # D_A and D_B\n self.set_requires_grad(self.netD, True)\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_D(i,True) # calculate gradients for D_A\n self.optimizer_D.step() \n else:\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_DX() # calculate gradients for D_A\n self.optimizer_D.step() \n self.current_label=self.labels[0]\n self.current_order=self.orders\n self.current_pred = np.concatenate((self.pred_real.detach().cpu().numpy().mean(\n axis=2).mean(axis=2), self.pred_fake.detach().cpu().numpy().mean(axis=2).mean(axis=2)))", "def optimize_model(optimizer, policy_net, target_net, memory_batch):\n state_batch, action_batch, reward_batch, next_state_batch, done_batch = memory_batch\n state_batch =state_batch.to(device, torch.float32)\n action_batch = action_batch.to(device, torch.int64).view(-1,1)\n reward_batch = reward_batch.to(device, torch.float32)\n next_state_batch = next_state_batch.to(device, torch.float32)\n done_batch = done_batch.to(device, torch.float32)\n\n # Compute Q(s_t, a) - the model computes Q(s_t), then we select the\n # columns of actions taken\n state_action_values = policy_net(state_batch).gather(1, action_batch)\n\n # Compute V(s_{t+1}) for all next states.\n with torch.no_grad():\n next_state_action_values = target_net(next_state_batch)\n next_state_values = next_state_action_values.max(1)[0]\n next_state_values = next_state_values * (1 - done_batch) # no reward if this episode is done.\n # Compute the expected Q values\n expected_state_action_values = (next_state_values * gamma) + reward_batch\n expected_state_action_values = expected_state_action_values.unsqueeze(1)\n\n # Compute Huber loss\n assert expected_state_action_values.requires_grad == False\n assert state_action_values.requires_grad == True\n loss = F.smooth_l1_loss(state_action_values, expected_state_action_values)\n\n # Optimize the model\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss", "def optimizer(grad, method, init_par, alpha, delta, plx_obs, mualpha_obs, mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N):\r\n\r\n\t\r\n\tif grad == 'NO':\r\n\t\tif method == 'Powell' :\r\n\t\t\tres = opt.minimize(Ulike,init_par, method = method,\r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N))\r\n\t\t\treturn res.x, res.nit\r\n\t\telif method == 'Nelder-Mead':\r\n\t\t\tres = opt.minimize(Ulike,init_par, method = method,\r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t\t options = {'ftol': 0.0001})\r\n\t\t\treturn res.x, res.nit\r\n\t\telif method == 'default':\r\n\t\t\tres = opt.minimize(Ulike,init_par, \r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N))\r\n\t\t\treturn res.x, res.nit\r\n\r\n\telif grad == 'YES':\r\n\t\tres = opt.minimize(Ulike, init_par, method = method, jac = stella_grad_full, \r\n \t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t options={'disp': True, 'maxiter': 4000, 'xtol': 1e-4})\r\n\t\treturn res.x, res.nit \r\n\t\t\t\r\n\t\t\r\n\telif grad == 'HESS':\r\n\t\tres = opt.minimize(Ulike, init_par, method = method, jac = stella_grad_full, hess = stella_hessian,\r\n\t\t\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t\t\t options = {'disp': True, 'maxiter': 4000, 'xtol': 1.e-06}) \r\n\t\treturn res.x, res.nit", "def add_optimizer_op(self, scope):\r\n\r\n optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)\r\n var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)\r\n grads_and_vars = optimizer.compute_gradients(self.loss, var_list=var_list)\r\n if self.config.grad_clip:\r\n grads_and_vars = [(tf.clip_by_norm(gv[0], self.config.clip_val), gv[1]) for gv in grads_and_vars if gv[0] != None]\r\n self.train_op = optimizer.apply_gradients(grads_and_vars)\r\n self.grad_norm = tf.global_norm([gv[0] for gv in grads_and_vars])", "def _set_optimizer(self):\n\n if self.optimizer_name == 'Adam':\n self.optimizer = optim.Adam(self.net.parameters(),\n lr=self.learning_rate,\n betas=self.betas,\n eps=1e-8,\n weight_decay=self.weight_decay)\n elif self.optimizer_name == 'SGD':\n self.optimizer = optim.SGD(self.net.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum,\n weight_decay=self.weight_decay)\n elif self.optimizer_name == 'SGD_Nesterov':\n self.optimizer = optim.SGD(self.net.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum,\n weight_decay=self.weight_decay,\n nesterov=True)\n elif self.optimizer_name == 'RMSprop':\n self.optimizer = optim.Adagrad(self.net.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum,\n weight_decay=self.weight_decay)\n elif self.optimizer_name == 'Adagrad':\n self.optimizer = optim.Adagrad(self.net.parameters(),\n lr=self.learning_rate,\n weight_decay=self.weight_decay)\n else:\n print(\"Optimizer '\" + self.optimizer_name + \"' not implemented.\")", "def _update_nn(self, bad_feats, good_feats, rate):\n # TODO: this is just adding another dimension to fit the parallelized scoring\n # (even if updates are not parallelized). Make it nicer.\n bad_feats = ([bad_feats[0]], [bad_feats[1]])\n good_feats = ([good_feats[0]], [good_feats[1]])\n\n cost_gcost = self.nn.update(*(bad_feats + good_feats + (rate,)))\n log_debug('Cost:' + str(cost_gcost[0]))\n param_vals = [param.get_value() for param in self.nn.params]\n log_debug('Param norms : ' + str(self._l2s(param_vals)))\n log_debug('Gparam norms: ' + str(self._l2s(cost_gcost[1:])))\n l1_params = param_vals[2]\n log_debug('Layer 1 parts :' + str(self._l2s([l1_params[0:100, :], l1_params[100:200, :],\n l1_params[200:350, :], l1_params[350:500, :],\n l1_params[500:, :]])))\n l1_gparams = cost_gcost[3]\n log_debug('Layer 1 gparts:' + str(self._l2s([l1_gparams[0:100, :], l1_gparams[100:200, :],\n l1_gparams[200:350, :], l1_gparams[350:500, :],\n l1_gparams[500:, :]])))", "def __update(self, learning_rate):\n for layer in self.layers:\n layer.weights.set_value((layer.weights - learning_rate * layer.dW).eval())\n layer.biases.set_value((layer.biases - learning_rate * layer.db).eval())", "def _policy_improvement(self) -> Tuple[np.ndarray, np.ndarray]:\n # Start with a (random) policy\n policy = np.zeros([self.state_dim, self.action_dim])\n V = np.zeros([self.state_dim])\n #random init the policy\n for s in range(self.state_dim):\n policy[s,0] = 0.0\n policy[s,1] = 0.0\n policy[s,2] = 1.0\n\n V = self._policy_eval(policy)\n\n policy_stable = False\n dr = 0.9\n\n while (policy_stable != True):\n policy_stable = True\n for s in self.mdp._state_dict:\n old_action = (policy[self.mdp._state_dict[s]]).tolist()\n action_dict = {}\n for a in self.mdp._action_dict:\n temp = 0.0\n for next_s in self.mdp._state_dict:\n p = self.mdp.P[self.mdp._state_dict[s],self.mdp._action_dict[a],self.mdp._state_dict[next_s]]\n r = self.mdp.R[self.mdp._state_dict[s],self.mdp._action_dict[a],self.mdp._state_dict[next_s]]\n Vs = V[self.mdp._state_dict[next_s]]\n temp = temp + p * (r + dr * Vs)\n action_dict[self.mdp._action_dict[a]]= temp \n max_act = max(action_dict.values())\n V[self.mdp._state_dict[s]] = max_act\n res = [t for t,v in action_dict.items() if v == max_act][0]\n for opt in range(self.action_dim):\n if opt == res:\n policy[self.mdp._state_dict[s],opt] = 1.0\n else:\n policy[self.mdp._state_dict[s],opt] = 0.0\n if (old_action - policy[self.mdp._state_dict[s]]).any() == True:\n \n policy_stable = False\n if policy_stable == False:\n V = self._policy_eval(policy)\n \n return policy, V", "def get_adv_optimizer(self, mode: str) -> torch.optim.Optimizer:\n pass", "def update(\r\n params: hk.Params,\r\n opt_state: OptState,\r\n batch, label, agreement\r\n ) -> Tuple[hk.Params, OptState]:\r\n # grads = jax.grad(loss)(params, batch, label)\r\n # grads_masked = (gradient_per_sample if use_ilc else gradient)(params, batch, label) # (gradient_per_sample)(params, batch, label)\r\n # sum_grad_masked_regularized = jax.tree_multimap(lambda x,y:x+y,grads_masked,gradient_reg(params))\r\n # grads = sum_grad_masked_regularized\r\n # updates, opt_state = opt.update(grads, opt_state)\r\n # new_params = optax.apply_updates(params, updates)\r\n\r\n grads_samples = gradient_per_sample(params, batch, label)\r\n ANDmask = and_mask(agreement)\r\n\r\n masked_grads,_ = ANDmask.update(grads_samples, opt_state)\r\n reg_grads = gradient_reg(params)\r\n\r\n sum_grad_masked_regularized = jax.tree_multimap(lambda x,y:x+y,masked_grads,reg_grads)\r\n \r\n updates,_ = opt.update(sum_grad_masked_regularized, opt_state)\r\n\r\n new_params = optax.apply_updates(params, updates)\r\n\r\n return new_params, opt_state", "def get_optimizer(args, net):\n if args.backbone_lr > 0.0:\n base_params = []\n resnet_params = []\n resnet_name = []\n resnet_name.append('layer0')\n resnet_name.append('layer1')\n #resnet_name.append('layer2')\n #resnet_name.append('layer3')\n #resnet_name.append('layer4')\n len_resnet = len(resnet_name)\n else:\n param_groups = net.parameters()\n\n if args.backbone_lr > 0.0:\n for name, param in net.named_parameters():\n is_resnet = False\n for i in range(len_resnet):\n if resnet_name[i] in name:\n resnet_params.append(param)\n # param.requires_grad=False\n print(\"resnet_name\", name)\n is_resnet = True\n break\n if not is_resnet:\n base_params.append(param)\n\n if args.sgd:\n if args.backbone_lr > 0.0:\n optimizer = optim.SGD([\n {'params': base_params},\n {'params': resnet_params, 'lr':args.backbone_lr}\n ],\n lr=args.lr,\n weight_decay=5e-4, #args.weight_decay,\n momentum=args.momentum,\n nesterov=False)\n else:\n optimizer = optim.SGD(param_groups,\n lr=args.lr,\n weight_decay=5e-4, #args.weight_decay,\n momentum=args.momentum,\n nesterov=False)\n else:\n raise ValueError('Not a valid optimizer')\n\n if args.lr_schedule == 'scl-poly':\n if cfg.REDUCE_BORDER_ITER == -1:\n raise ValueError('ERROR Cannot Do Scale Poly')\n\n rescale_thresh = cfg.REDUCE_BORDER_ITER\n scale_value = args.rescale\n lambda1 = lambda iteration: \\\n math.pow(1 - iteration / args.max_iter,\n args.poly_exp) if iteration < rescale_thresh else scale_value * math.pow(\n 1 - (iteration - rescale_thresh) / (args.max_iter - rescale_thresh),\n args.repoly)\n scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)\n elif args.lr_schedule == 'poly':\n lambda1 = lambda iteration: math.pow(1 - iteration / args.max_iter, args.poly_exp)\n scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)\n else:\n raise ValueError('unknown lr schedule {}'.format(args.lr_schedule))\n\n return optimizer, scheduler", "def soft_update_target_network(self):\n \n pars_behavior = self.model.get_weights() # these have form [W1, b1, W2, b2, ..], Wi = weights of layer i\n pars_target = self.target_model.get_weights() # bi = biases in layer i\n \n for par_behavior,par_target in zip(pars_behavior,pars_target):\n par_target = par_target*(1-self.tau) + par_behavior*self.tau\n pars_target[ctr] = par_target\n\n self.target_model.set_weights(pars_target)", "def setOptimizerParams(self,lr,momentum,decay):\n self.optimizer = SGD(lr=lr,momentum=momentum,decay=decay)", "def update_target(self):\n if self.soft:\n self.soft_update_target(self.critic1_target, self.critic1)\n self.soft_update_target(self.critic2_target, self.critic2)\n else:\n if self.learn_cur % self.learn_replace == 0:\n self.critic1_target.load_state_dict(self.critic1.state_dict())\n self.critic2_target.load_state_dict(self.critic2.state_dict())", "def update(self):\n for filter in self.filters:\n filter.update(self.learning_rate)", "def update_policy(self):\n raise UnityTrainerException(\"The update_model method was not implemented.\")", "def compile_update_softmax(nnet, inputs, targets):\n\n floatX = Cfg.floatX\n C = Cfg.C\n\n final_layer = nnet.all_layers[-1]\n trainable_params = lasagne.layers.get_all_params(final_layer,\n trainable=True)\n\n # Regularization\n if Cfg.weight_decay:\n l2_penalty = (floatX(0.5) / C) * get_l2_penalty(nnet, Cfg.include_bias)\n else:\n l2_penalty = T.cast(0, dtype='floatX')\n\n # Backpropagation\n prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=False)\n if Cfg.ad_experiment:\n train_loss = T.mean(l_objectives.binary_crossentropy(\n prediction.flatten(), targets),\n dtype='floatX')\n train_acc = T.mean(l_objectives.binary_accuracy(prediction.flatten(),\n targets),\n dtype='floatX')\n else:\n train_loss = T.mean(l_objectives.categorical_crossentropy(prediction,\n targets),\n dtype='floatX')\n train_acc = T.mean(T.eq(T.argmax(prediction, axis=1), targets),\n dtype='floatX')\n\n\n train_obj = T.cast(train_loss + l2_penalty, dtype='floatX')\n updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.solver)\n nnet.backprop = theano.function([inputs, targets],\n [train_obj, train_acc],\n updates=updates)\n\n # Forwardpropagation\n test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=True)\n if Cfg.ad_experiment:\n test_loss = T.mean(l_objectives.binary_crossentropy(\n test_prediction.flatten(), targets), dtype='floatX')\n test_acc = T.mean(l_objectives.binary_accuracy(\n test_prediction.flatten(), targets), dtype='floatX')\n else:\n test_loss = T.mean(l_objectives.categorical_crossentropy(\n test_prediction, targets), dtype='floatX')\n test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), targets),\n dtype='floatX')\n test_obj = T.cast(test_loss + l2_penalty, dtype='floatX')\n nnet.forward = theano.function([inputs, targets],\n [test_obj, test_acc, test_prediction,\n l2_penalty, test_loss])", "def optimize_agent(trial):\n\tmodel_params = optimize_ppo2(trial)\n\t\n\t\"\"\"\n\tenv = SubprocVecEnv([make_env(i, agents) for i in range(num_cpu)])\n\tmodel = PPO2(POLICY_TYPE, env, nminibatches=1, **model_params) \n\t# n_steps (int) – The number of steps to run for each environment per update (i.e. rollout buffer size is n_steps * n_envs where n_envs is number of environment copies running in parallel)\n\t# by default n_steps=128. After 128 steps for each env, the policy will be updated. If 3 days per game and 2 seq per day, then every update reqires 128/2/3 = 21 games\n\tenv.env_method(\"set_model_reference\", model.get_parameters())\n\t\"\"\"\n\tenv = TradingGameEnv.TradingGameEnv(player_count = NUM_PLAYERS, other_agent_list = agents,\n\t\t\tseq_per_day = SEQ_PER_DAY, cards_per_suit = CARDS_PER_SUIT, player_hand_count = HAND_COUNT,\n\t\t\trandom_seq = True, self_play = SELF_PLAY, policy_type = POLICY_TYPE, self_copy_freq = SELF_COPY_FREQ,\n\t\t\tobs_transaction_history_size=TRANSACTION_HISTORY_SIZE)\n\tmodel = PPO2(POLICY_TYPE, env, nminibatches=1, **model_params) \n\tenv.set_model_reference(model.get_parameters())\n\t\n\t# save a copy of model every 5e4*num_cpu games\n\tcopy_call_back = CustomCallback(model, env)\n\tcall_back_list = [EveryNTimesteps(n_steps=model_params['n_steps']*10, callback=copy_call_back)]\n\n\tmodel.learn(total_timesteps=TRAINING_TIME_STEPS, callback=call_back_list)\n\t\n\t# Evaluate the result against baseline agent\n\tenv = TradingGameEnv.TradingGameEnv(player_count = NUM_PLAYERS, other_agent_list = agents,\n\t\tseq_per_day = SEQ_PER_DAY, cards_per_suit = CARDS_PER_SUIT, player_hand_count = HAND_COUNT,\n\t\trandom_seq = True, self_play = False, obs_transaction_history_size=TRANSACTION_HISTORY_SIZE,\n\t\teval=True)\n\n\tmean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=EVAL_EPISODES)\n\t\n\twith open(\"optuna_params/\"+str(trial.number)+\".txt\", \"w\") as file:\n\t\t# Writing data to a file\n\t\tfile.write(\"mean reward: \" + str(mean_reward) + \"\tstd reward: \" + str(std_reward) +\"\\n\")\n\t\tfile.write(str(model_params))\n\t\n\treturn -1 * mean_reward", "def update_model(self, **kwargs):\n self.__dict__.update(kwargs)\n opt_params = ['optimizer_params', 'optimizer']\n if any(item in kwargs.keys() for item in opt_params):\n self.get_unet_model()", "def update_target(self, target, pred, update_rate):\n for target_param, pred_param in zip(target.parameters(), pred.parameters()):\n target_param.data.copy_((1.0 - update_rate)\n * target_param.data + update_rate * pred_param.data)", "def add_optimizer_op(self, scope):\r\n optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)\r\n variables = tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)\r\n\r\n grads_vars = optimizer.compute_gradients(loss=self.loss, var_list=variables)\r\n if self.config.grad_clip: # clip by global norm\r\n grads_vars = [(tf.clip_by_norm(grad, self.config.clip_val), var) if grad is not None else (grad, var) for grad, var in grads_vars]\r\n\r\n self.train_op = optimizer.apply_gradients(grads_and_vars=grads_vars)\r\n self.grad_norm = tf.global_norm([grad for grad, _ in grads_vars]) # we log the clipped norm here\r", "def optimizer(self):\n return self._optimizer", "def update_parameters(parameters, grads, learning_rate):\n pass", "def _create_networks_and_optimizer(self):\n self.policy_net = DeepQNetwork(self.num_inputs,\n self.hidden_layers, \n self.num_actions).to(device)\n self.target_net = DeepQNetwork(self.num_inputs,\n self.hidden_layers, \n self.num_actions).to(device)\n self._update_target_net()\n \n self.optimizer = optim.Adam(self.policy_net.parameters(), \n lr=self.lr, eps=1e-7)", "def _optimize(self):\n # Retrieve all trainable variables\n train_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n\n # Compute the gradient (return a pair of variable and their respective gradient)\n grads = self.optimizer.compute_gradients(loss=self.loss, var_list=train_variables)\n self.train_dis = self.optimizer.apply_gradients(grads, global_step=self.global_step)", "def __init__(self, optimizer):\n super(ShardedOptimizer, self).__init__(optimizer, name=\"ShardedOptimizer\")", "def compute_optimal_policy(self):\n\n self.theta_history.append(self.theta)\n\n since = time()\n for it in range(self.n_itr):\n print(\"lr: {} | Iteration N: {} \\r\".format(self.lr, it), end=\"\")\n\n self.policy = GaussianPolicy(self.theta, self.sigma)\n\n # Simulate N trajectories\n paths = collect_episodes(\n self.sim, policy=self.policy, horizon=self.T, n_episodes=self.n_episodes)\n\n avg_return = self._compute_performance(paths=paths)\n self.avg_returns.append(avg_return)\n\n # Gradient update\n self.theta += self.update_rule(self.policy.grad_J(\n paths, self.discounts, n_ep=self.n_episodes, T=self.T), lr=self.lr)\n\n # History update\n self.theta_history.append(self.theta)\n\n # print(\"\\nTook {}s\".format(round(time() - since, 2)))\n print(\"lr: {} | Iteration N: {} | Took: {}s\".format(self.lr, self.n_itr, round(time() - since, 2)))", "def soft_update(source_net, target_net, tau):\n for target_param, param in zip(target_net.parameters(), source_net.parameters()):\n target_param.data.copy_(\n target_param.data * (1.0 - tau) + param.data * tau\n )", "def setup_optims(self):\n lr = self.train_config['lr']\n b1 = self.train_config['b1']\n b2 = self.train_config['b2']\n weight_decay = self.train_config['weight_decay']\n self.opt = torch.optim.Adam(self.network.parameters(), lr=lr, betas=(b1, b2),\n weight_decay=weight_decay)", "def update_policy(env, policy, V, discount_factor):\n\n for state in range(env.nS):\n # for a given state compute state-action value.\n action_values = one_step_lookahead(env, state, V, discount_factor)\n\n # choose the action which maximizes the state-action value.\n policy[state] = np.argmax(action_values)\n\n return policy", "def soft_update_target_network(self):\n \n pars_behavior = self.model.get_weights() # these have form [W1, b1, W2, b2, ..], Wi = weights of layer i\n pars_target = self.target_model.get_weights() # bi = biases in layer i\n \n ctr = 0\n for par_behavior,par_target in zip(pars_behavior,pars_target):\n par_target = par_target*(1-self.tau) + par_behavior*self.tau\n pars_target[ctr] = par_target\n ctr += 1\n\n self.target_model.set_weights(pars_target)", "def update(self,parameters, grads):\n \n L = len(parameters) // 2 # number of layers in the neural network\n #print(L)\n\n # Update rule for each parameter. Use a for loop.\n for l in range(L):\n \n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - self.alpha * grads[\"dW\" + str(l+1)]\n \n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - self.alpha * grads[\"db\" + str(l+1)]\n \n parameters[\"W\" + str(l+1)][np.isnan(parameters[\"W\" + str(l+1)])] = 0\n parameters[\"b\" + str(l+1)][np.isnan(parameters[\"b\" + str(l+1)])] = 0\n \n return parameters", "def set_optimizer_params(self):\n n_params = len(self.optim_params)\n if self.optimizer_name == 'GradientDescent' and n_params == 1:\n self.optimizer = tf.keras.optimizers.SGD(\n learning_rate=self.optim_params[0],\n momentum=0)\n elif self.optimizer_name == 'Momentum' and n_params == 2:\n self.optimizer = tf.keras.optimizers.SGD(\n learning_rate=self.optim_params[0],\n momentum=self.optim_params[1])\n elif self.optimizer_name == 'AdaGrad' and n_params == 2:\n self.optimizer = tf.keras.optimizers.Adagrad(\n learning_rate=self.optim_params[0],\n initial_accumulator_value=self.optim_params[1])\n elif self.optimizer_name == 'AdaDelta' and n_params == 2:\n self.optimizer = tf.keras.optimizers.Adam(\n learning_rate=self.optim_params[0],\n rho=self.optim_params[1])\n elif self.optimizer_name == 'RMSProp' and n_params == 3:\n self.optimizer = tf.keras.optimizers.Adam(\n learning_rate=self.optim_params[0],\n rho=self.optim_params[1],\n momentum=self.optim_params[2])\n elif self.optimizer_name == 'Adam' and n_params == 3:\n self.optimizer = tf.keras.optimizers.Adam(\n learning_rate=self.optim_params[0],\n beta_1=self.optim_params[1],\n beta_2=self.optim_params[2])\n elif self.optimizer_name == 'Nadam' and n_params == 3:\n self.optimizer = tf.keras.optimizers.Nadam(\n learning_rate=self.optim_params[0],\n beta_1=self.optim_params[1],\n beta_2=self.optim_params[2])\n else:\n raise Exception(\"[ERROR] Wrong optimizer or parameters for \"\n \"optimizer\")", "def configure_optimizers(self):\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n if self.optimizer == \"adamw\":\n optimizer = AdamW(optimizer_grouped_parameters,\n betas=(0.9, 0.98), # according to RoBERTa paper\n lr=self.args.lr,\n eps=self.args.adam_epsilon,)\n elif self.optimizer == \"torch.adam\":\n optimizer = torch.optim.AdamW(optimizer_grouped_parameters,\n lr=self.args.lr,\n eps=self.args.adam_epsilon,\n weight_decay=self.args.weight_decay)\n else:\n raise ValueError(\"Optimizer type does not exist.\")\n num_gpus = len([x for x in str(self.args.gpus).split(\",\") if x.strip()])\n t_total = (len(self.train_dataloader()) // (self.args.accumulate_grad_batches * num_gpus) + 1) * self.args.max_epochs\n warmup_steps = int(self.args.warmup_proportion * t_total)\n if self.args.lr_scheduler == \"onecycle\":\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer, max_lr=self.args.lr, pct_start=float(warmup_steps/t_total),\n final_div_factor=self.args.final_div_factor,\n total_steps=t_total, anneal_strategy='linear')\n elif self.args.lr_scheduler == \"linear\":\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)\n elif self.args.lr_scheulder == \"polydecay\":\n if self.args.lr_mini == -1:\n lr_mini = self.args.lr / self.args.polydecay_ratio\n else:\n lr_mini = self.args.lr_mini\n scheduler = get_polynomial_decay_schedule_with_warmup(optimizer, warmup_steps, t_total, lr_end=lr_mini)\n else:\n raise ValueError\n return [optimizer], [{\"scheduler\": scheduler, \"interval\": \"step\"}]", "def update_parameters_with_gd(parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)]-learning_rate* grads[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)]-learning_rate* grads[\"db\" + str(l+1)]\n ### END CODE HERE ###\n \n return parameters", "def policy_update(self):\n mini_batch = random.sample(self.data_buffer, self.batch_size)\n state_batch = [data[0] for data in mini_batch]\n mcts_probs_batch = [data[1] for data in mini_batch]\n winner_batch = [data[2] for data in mini_batch]\n loss, entropy = self.policy_value_net.train_step(\n state_batch,\n mcts_probs_batch,\n winner_batch,\n self.learn_rate)\n return loss, entropy", "def optimize(self):\n\t\ts1,a1,r1,s2 = self.ram.agg_sample(self.batch_size)\n\n\t\ts1 = Variable(torch.from_numpy(s1))\n\t\ta1 = Variable(torch.from_numpy(a1))\n\t\tr1 = Variable(torch.from_numpy(r1))\n\t\ts2 = Variable(torch.from_numpy(s2))\n\n\t\tfor i in range(self.critic_step):\n\t\t\t# ---------------------- optimize critic ----------------------\n\t\t\t# Use target actor exploitation policy here for loss evaluation\n\t\t\t\n\t\t\t# a2 = self.target_actor.forward(s2).detach()\n\t\t\t# next_val = torch.squeeze(self.target_critic.forward(s2, a2).detach())\n\t\t\t\n\t\t\t# y_exp = r + gamma*Q'( s2, pi'(s2))\n\t\t\ty_expected = r1 #+ GAMMA*next_val\n\t\t\t# y_pred = Q( s1, a1)\n\t\t\ty_predicted = torch.squeeze(self.critic.forward(s1, a1))\n\t\t\t# compute critic loss, and update the critic\n\t\t\t#print(y_predicted,y_expected,\"hi\")\n\t\t\tloss_critic = F.smooth_l1_loss(y_predicted, y_expected.squeeze())\n\t\t\tself.critic_optimizer.zero_grad()\n\t\t\tloss_critic.backward()\n\t\t\tself.critic_optimizer.step()\n\n\t\t# ---------------------- optimize actor ----------------------\n\t\tpred_a1 = self.actor.forward(s1)\n\t\tloss_actor = -1*torch.sum(self.critic.forward(s1, pred_a1))\n\t\tself.actor_optimizer.zero_grad()\n\t\tloss_actor.backward()\n\t\tself.actor_optimizer.step()\n\n\t\tutils.soft_update(self.target_actor, self.actor, TAU)\n\t\tutils.soft_update(self.target_critic, self.critic, TAU)\n\n\t\t# if self.iter % 100 == 0:\n\t\tif self.batch_size > 1:\n\t\t\ty_1 = y_predicted.data.numpy()[0]\n\t\t\tr_1 = r1.data.numpy()[0]\n\t\telse:\n\t\t\ty_1 = y_predicted.data.numpy()\n\t\t\tr_1 = r1.data.numpy()\n\t\tprint ('Iteration :- ', self.iter, ' Loss_actor :- ', loss_actor.data.numpy(),\\\n\t\t\t' Loss_critic :- ', loss_critic.data.numpy(), ' Critic Pred Reward :- ', y_1, ' Actual Reward :- ', r_1)\n\t\tself.iter += 1", "def _build_algorithm(self):\n self.optimizer = tf.train.AdamOptimizer(self._lr, epsilon=1.5e-8)\n trainable_variables = tf.trainable_variables(\"main/qnet\")\n\n # Compute the state value.\n batch_size = tf.shape(self._observation)[0]\n action_index = tf.stack([tf.range(batch_size), self._action], axis=1)\n action_q = tf.gather_nd(self._qvals, action_index)\n assert_shape(action_q, [None])\n\n # Compute back up.\n ave_q = tf.add_n(self._target_qvals) / self._n_net\n assert_shape(tf.reduce_max(ave_q, axis=1), [None])\n q_backup = tf.stop_gradient(self._reward + self._discount * (1 - self._done) * tf.reduce_max(ave_q, axis=1))\n\n # Compute loss and optimize the object.\n loss = tf.reduce_mean(tf.squared_difference(q_backup, action_q)) # 损失值。\n self._train_op = self.optimizer.minimize(loss, var_list=trainable_variables)\n\n # Update target network.\n update_target_operation = []\n for i in reversed(range(1, self._n_net)): # i=0表示最近的模型。\n with tf.control_dependencies(update_target_operation):\n update_target_operation.append(self._update_target(f\"target_{i}/qnet\", f\"target_{i-1}/qnet\"))\n\n with tf.control_dependencies(update_target_operation):\n update_target_operation.append(self._update_target(\"target_0/qnet\", \"main/qnet\"))\n\n self.update_target_op = update_target_operation\n self._log_op = {\"loss\": loss}", "def modify(nets, probs, ranks, desc, hypers, seed=0, seed2=0):\n\n name = str(seed)\n\n np.random.seed(seed2)\n tf.random.set_random_seed(seed2)\n random.seed(seed2)\n\n if not rnd: # If randomness is not applied\n print(ranks.sum(axis=1))\n if (ranks.sum(axis=1) == 0).any(): # If there are any network in the bottom three in importance in all objectives\n probs = (ranks.sum(axis=1) == 0) * probs # Only accept a network as modifiable if they rank between 3 least important networks in all three objectives\n probs = probs / np.sum(probs) # Update probabilities once the networks more important than bottom three have been taken away\n trainables, res, mutation, comp, reaching_outs = reducing_mutations(nets, probs, desc)\n else:\n trainables, res, mutation, comp, reaching_outs = increasing_mutations(nets, probs, desc)\n else: # Random application\n comp = np.random.choice(nets)\n _, in_conns, out_conns, _ = desc.get_net_context(comp)\n conns = in_conns + out_conns # Checka si esto da error\n reaching_outs = list(set([x for x in desc.reachable[comp] if \"o\" in x])) # Outputs affected by the mutation\n mutations = [con for con in conns if is_deletable(desc, con)]\n\n mutations += [\"add_con\", \"divide_con\", \"reinit\"]\n\n if is_bypassable(desc, comp):\n mutations += [\"bypass\"]\n\n mutation = np.random.choice(mutations)\n res, trainables = mutate(mutation, desc, comp, conns)\n print(mutation)\n model = MNM(desc, hypers[\"btch_sz\"], data_inputs[\"Train\"], data_outputs[\"Train\"], loss_func_weights={\"o0\": hypers[\"wo0\"], \"o1\": hypers[\"wo1\"], \"o2\": hypers[\"wo2\"]}, name=name, load=None, init=False, random_seed=seed2, lr=0.001)\n\n model.initialize(load=True, load_path=\"\", variables=trainables)\n\n model.convergence_train(hypers[\"btch_sz\"], iter_lim//100, conv_param, proportion, iter_lim//20, display_step=-1)\n\n results = evaluate_model(model)\n\n del model\n\n if rnd == 1:\n n = \"resultsrandom\"\n else:\n n = \"results\"\n\n np.save(n + str(seed) + \"_\" + str(seed2) + \".npy\", np.concatenate((results, [res, mutation, comp], reaching_outs)))", "def reset_training(self):\n self.policy_optim = Adam(self.policy.parameters(), lr=self.lr)\n self.q_optim = Adam(self.q_net.parameters(), lr=self.lr)\n\n self.alpha_optim = Adam([self.log_alpha], lr=1e-2)", "def propose_optimize():\n pass", "def _update_parameter(self, dWxh, dbh, dWhy, dby):\n # Add code to update all the weights and biases here", "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def optimize_parameters(self) -> None:\n self.forward() # compute fake images: G(A)\n # update discriminator\n self.set_requires_grad([self._discriminator_module], True) # enable backward for D\n self._discriminator_optimizer.zero_grad() # set D's gradients to zero\n self.backward_discriminator() # calculate gradients for D\n self._discriminator_optimizer.step() # update D's weights\n # update generator\n self.set_requires_grad([self._discriminator_module], False) # D requires no gradients when optimizing G\n self._generator_optimizer.zero_grad() # set G's gradients to zero\n self.backward_generator() # calculate gradients for G\n self._generator_optimizer.step() # update G's weights\n return", "def adjust_learning_rate(opt, optimizer, epoch):\r\n lr = opt.learning_rate * (0.1 ** (epoch // opt.lr_update))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def freeze_neuron(self,optimizer):\n\n n_neurons = self.num_hiddens\n params = []\n for i in range(n_neurons):\n params.append(\n # input2hidden\n {'params': self.input2hidden_layers[str(i)].parameters(), 'lr': 0},\n )\n params.append(\n # hidden2output\n {'params': self.hidden2output_layers[str(i)].parameters(), 'lr': 0.001},\n )\n if n_neurons > 1:\n for i in range(int(n_neurons*(n_neurons-1)/2)):\n params.append(\n # hidden2hidden\n {'params': self.hidden2hidden_layers[str(i)].parameters(), 'lr': 0},\n )\n optimizer = torch.optim.SGD(params, momentum=0.9,lr=0.001)\n return optimizer", "def update_params(self):\n if self.clip > 0:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)\n self.optimizer.step()", "def _build_optimizers(self):\r\n self._optimize_ops = []\r\n all_trainable_variables = tf.trainable_variables()\r\n all_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n all_reg_losses = tf.losses.get_regularization_losses()\r\n for spec in self._learning_schedule:\r\n optimize_ops = []\r\n update_ops = []\r\n loss_terms = spec['loss_terms_to_optimize']\r\n reg_losses = []\r\n assert isinstance(loss_terms, dict)\r\n for loss_term_key, prefixes in loss_terms.items():\r\n assert loss_term_key in self.loss_terms['train'].keys()\r\n variables_to_train = []\r\n for prefix in prefixes:\r\n variables_to_train += [\r\n v for v in all_trainable_variables\r\n if v.name.startswith(prefix)\r\n ]\r\n update_ops += [\r\n o for o in all_update_ops\r\n if o.name.startswith(prefix)\r\n ]\r\n reg_losses += [\r\n l for l in all_reg_losses\r\n if l.name.startswith(prefix)\r\n ]\r\n\r\n optimizer_class = tf.train.AdamOptimizer\r\n optimizer = optimizer_class(\r\n learning_rate=self.learning_rate_multiplier * spec['learning_rate'],\r\n # beta1=0.9,\r\n # beta2=0.999,\r\n )\r\n final_loss = self.loss_terms['train'][loss_term_key]\r\n if len(reg_losses) > 0:\r\n final_loss += tf.reduce_sum(reg_losses)\r\n with tf.control_dependencies(update_ops):\r\n gradients, variables = zip(*optimizer.compute_gradients(\r\n loss=final_loss,\r\n var_list=variables_to_train,\r\n aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N,\r\n ))\r\n # gradients, _ = tf.clip_by_global_norm(gradients, 5.0) # TODO: generalize\r\n optimize_op = optimizer.apply_gradients(zip(gradients, variables))\r\n optimize_ops.append(optimize_op)\r\n self._optimize_ops.append(optimize_ops)\r\n logger.info('Built optimizer for: %s' % ', '.join(loss_terms.keys()))", "def init_optimizer_for_pruning(cls, optimizer):\n assert (cls.__optimizer is None), \"ASP has initialized optimizer already.\"\n assert (cls.__calculate_mask is not None), \"Called ASP.init_optimizer_for_pruning before ASP.init_model_for_pruning.\"\n\n # store pointer to original optimizer step method\n cls.__optimizer = optimizer\n cls.__optimizer.__step = optimizer.step\n\n def __step(opt_self, *args, **kwargs):\n # prune gradients before step method\n with torch.no_grad():\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n p.grad.mul_(mask)\n # call original optimizer step method\n rval = opt_self.__step(*args, **kwargs)\n # prune parameters after step method\n with torch.no_grad():\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n p.mul_(mask)\n return rval\n cls.__optimizer.step = types.MethodType(__step, cls.__optimizer)", "def update_target(self):\n with torch.no_grad():\n for target_q_param, q_param in zip(self.target_q_funcs.parameters(), self.q_funcs.parameters()):\n target_q_param.data.copy_(self.tau * q_param.data + (1.0 - self.tau) * target_q_param.data)\n for target_pi_param, pi_param in zip(self.target_policy.parameters(), self.policy.parameters()):\n target_pi_param.data.copy_(self.tau * pi_param.data + (1.0 - self.tau) * target_pi_param.data)", "def add_optimizer(self, optimizer):\n assert isinstance(optimizer, torch.optim.Optimizer)\n setattr(self, 'optimizer'+str(self._optimizer_counter), optimizer)\n self._optimizer_counter += 1\n # optimizer indexing : optimizer 0 is the optimizer for layer 0" ]
[ "0.7567972", "0.7425979", "0.6807146", "0.6604179", "0.6524688", "0.6521089", "0.6455929", "0.64432573", "0.63913226", "0.63542", "0.632937", "0.6310982", "0.62825817", "0.6239003", "0.62361735", "0.62241197", "0.6202959", "0.6191064", "0.61622465", "0.61608213", "0.6155807", "0.6154827", "0.61027354", "0.6089262", "0.6089262", "0.6085368", "0.60815793", "0.6064262", "0.605735", "0.60568947", "0.6051021", "0.6044685", "0.6037718", "0.6023418", "0.6015278", "0.6013912", "0.60100234", "0.60089344", "0.60072345", "0.59972", "0.5956028", "0.594289", "0.5934958", "0.59321576", "0.5925213", "0.59249663", "0.5918806", "0.5897359", "0.58891904", "0.58857363", "0.5858641", "0.58483166", "0.5843571", "0.5842187", "0.5833908", "0.58264035", "0.58262163", "0.58256984", "0.58205885", "0.58151174", "0.581073", "0.58097446", "0.58089554", "0.58068806", "0.57941526", "0.5789539", "0.5789499", "0.57816654", "0.57803446", "0.57793474", "0.5776188", "0.5772755", "0.5767619", "0.57627887", "0.57598096", "0.5752204", "0.5742143", "0.57357246", "0.5727063", "0.571963", "0.57173187", "0.5711307", "0.57048637", "0.5704376", "0.5688065", "0.56878924", "0.56878036", "0.56875557", "0.56844413", "0.5678447", "0.56674236", "0.5667351", "0.56633043", "0.5660387", "0.56587577", "0.5657645", "0.56478924", "0.56437385", "0.5642455", "0.56423897", "0.5634184" ]
0.0
-1
Gets and sets the vppTokenId
def vpp_token_id(self): if "vppTokenId" in self._prop_dict: return self._prop_dict["vppTokenId"] else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def token_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_id\")", "def get_token(self):\n auth_data = {\"auth\": {\"tenantName\": 'service',\n \"passwordCredentials\":{ \"username\": 'vsm',\n \"password\": self._password}}}\n\n auth_request = urllib2.Request(self._auth_url)\n auth_request.add_header(\"content-type\", \"application/json\")\n auth_request.add_header('Accept', 'application/json')\n auth_request.add_header('User-Agent', 'python-mikeyp')\n auth_request.add_data(json.dumps(auth_data))\n auth_response = urllib2.urlopen(auth_request)\n response_data = json.loads(auth_response.read())\n\n self._token = response_data['access']['token']['id']\n\n service_list = response_data['access']['serviceCatalog']\n for s in service_list:\n if s['type'] == 'vsm' and s['name'] == 'vsm':\n self._vsm_url = s['endpoints'][0]['publicURL']\n break\n\n url_id = self._vsm_url.split('/')[-1]\n return self._token + \"-\" + url_id", "def get_token(self, token_id):\n raise exception.NotImplemented() # pragma: no cover", "def token_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"token_id\")", "def _get_auth_token(self):\n\n __logger__.debug(\"Getting auth Token\")\n return self.keystone_client.auth_ref['token']['id']", "def getId(self):\n return self.__vmId", "def _set_token(self):\n f = open(\".cli_token\")\n data = f.read()\n if data is not None:\n self.token = data\n return self.token", "def get_token():\n return session.get('microsoft_token')", "def get_token():\n return session.get('microsoft_token')", "def GetToken(self):\n if self.auth_token_:\n return self.auth_token_\n raise RuntimeError('ClientLoginAuthPolicy is not logged in.')", "def voting_token(self) -> str:\n return self._voting_token", "def token_id_from(self, token_id_from):\n\n self._token_id_from = token_id_from", "def EstablishAuthToken(self, opener):\n url = 'https://www.pivotaltracker.com/services/v3/tokens/active'\n data = parse.urlencode((('username', self.username),\n ('password', self.password)))\n try:\n req = opener.open(url, data.encode())\n except error.HTTPError as e:\n if e.code == 404:\n raise NoTokensAvailableException(\n 'Did you create any? Check https://www.pivotaltracker.com/profile')\n else:\n raise\n\n res = req.read()\n\n dom = minidom.parseString(res)\n token = dom.getElementsByTagName('guid')[0].firstChild.data\n\n return token", "def token(self):\n if not self._token:\n self._token = self.authenicate().token\n\n return self._token", "def VplsIdType(self):\n return self._get_attribute('vplsIdType')", "def _get_token(self):\n return user.get_token()", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "async def twitchtoken(self):\n self.settings[\"TWITCH_TOKEN\"] = \"6mmlypg9emj6jebbpylmlpejwxj2pn\"\n dataIO.save_json(\"data/streams/settings.json\", self.settings)\n await self.bot.say('Twitch Client-ID set.')", "def get_new_token(self):\n # Save result of this API call into self instance __token\n self.__token = apidnac.ApiDNAC.api_get_token()\n # Save result to the defined parameter (\"token\") in file cache_config\n self.save_param('token', self.__token)\n # Return self instance __token\n return self.__token", "def generate(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenGenerate)['token']", "def get_token(self):\n self.token = self._session.fetch_token(\n token_url=CLOUD_URLS[\"get_token\"][1],\n client_id=self._client_id,\n client_secret=self._client_secret\n )", "def get_token(self) -> None:\n context_dict = demisto.getIntegrationContext()\n cur_token = context_dict.get('token')\n refresh_token = context_dict.get('refresh_token')\n\n if cur_token:\n self._headers['NetWitness-Token'] = cur_token\n self.refresh_token = refresh_token\n else:\n self.generate_new_token(refresh_token)", "def token_id_hex(self) -> str: # this is *ALSO* a MINT property\n return self.token_id.hex()", "def vm_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vm_id\")", "def get_token(self):\n\n return self._token", "def auth_token(self):", "def get_current_uid():\n # TODO: Find a better way to access the token\n return request.token['id']", "def authenticationToken(self):\n return self.authToken", "def vm_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"vm_id\")", "def vm_id(self):\n return self.vm_info.get('id', 'Error retrieving ID')", "def get_token(self):\n return self.__token", "def get_token(self):\n return self.__token", "def get_token():\n global vault_token\n global vault_token_time\n current_app.logger.info('************* GET TOKEN METHOD **************')\n return 'root'\n if validate_token():\n vault_duration = None\n try:\n auth_type = current_app.config.get('VAULT_AUTH', 'TOKEN')\n current_app.logger.info('*********** Auth Type: ' + auth_type)\n if auth_type == 'TOKEN':\n vault_token = current_app.config.get('VAULT_AUTH_TOKEN')\n elif auth_type == 'USERPASS':\n vault_token, vault_duration = authenticate_userpass()\n elif auth_type == 'LDAP':\n vault_token, vault_duration = authenticate_ldap()\n elif auth_type == 'CERT':\n vault_token, vault_duration = authenticate_certificate()\n elif auth_type == 'GCP':\n vault_token, vault_duration = authenticate_gcp()\n elif auth_type == 'APPROLE':\n vault_token, vault_duration = authenticate_approle()\n else:\n current_app.logger.info('Vault: VAULT_AUTH not configured correctly.')\n raise RuntimeError('Vault: VAULT_AUTH not configured correctly.')\n if vault_duration is not None:\n vault_token_time = datetime.datetime.now() + datetime.timedelta(seconds=int(vault_duration))\n \n current_app.logger.info('*********** TOKEN: ' + vault_token) \n\n except ConnectionError as ConnError:\n current_app.logger.info('Vault: There was an error while connecting to Vault server.')\n raise ConnError\n\n return vault_token", "def key_vault_id(self) -> str:\n return pulumi.get(self, \"key_vault_id\")", "def vmid(self):\n return self.raw[\"VMId\"]", "def token(self):\n return self[\"token\"]", "def get_token(self):\n token = self._session.token\n return token", "def UserToken(self) -> object:", "def vswitch_id(self) -> str:\n return pulumi.get(self, \"vswitch_id\")", "def vswitch_id(self) -> str:\n return pulumi.get(self, \"vswitch_id\")", "def vswitch_id(self) -> str:\n return pulumi.get(self, \"vswitch_id\")", "def vswitch_id(self) -> str:\n return pulumi.get(self, \"vswitch_id\")", "def get_id(self):\n\t\treturn call_sdk_function('PrlSrvCfgDev_GetId', self.handle)", "def identifier(self) -> str:\n return self.current_token", "def get_client_token(**_):\n return str(uuid.uuid4())", "def token(self):\n\n if not self.requests:\n return None\n return self.requests[0].token", "def claimToken(self):\n response = self._session.get('https://plex.tv/api/claim/token.json', headers=self._headers(), timeout=TIMEOUT)\n if response.status_code not in (200, 201, 204): # pragma: no cover\n codename = codes.get(response.status_code)[0]\n errtext = response.text.replace('\\n', ' ')\n raise BadRequest(f'({response.status_code}) {codename} {response.url}; {errtext}')\n return response.json()['token']", "def get_token_id(self):\n return f\"{self.document_title}_{self.index}\"", "def token(self):\r\n return self._token", "def get_token(self, name):\n if self.kv.get(name):\n return self.kv.get(name)\n token = self.random_string(24)\n self.kv.set(name, token)\n return token", "def token(self):\n return self._token", "def token(self):\n return self._token", "def token(self):\n return self._token", "def verification_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"verification_token\")", "def getOwnerIdFromToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_token(self):\n token_model = TokenModel.find_by_user_id(self.id)\n return token_model.token if token_model else None", "def _convert_token_to_id(self, token):\n return self.sp_model.PieceToId(str(token))", "def device_token(self):\n return self._device_token", "def _convert_token_to_id(self, token):\n return self.sp_model.PieceToId(token)", "def token(self, id):\r\n return Token(self, id)", "def GenerateIdToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def get_token(self):\n url = '/auth-token/'\n data = self._http_post(url, self.credentials)\n token = data['token']\n assert len(token) == 40, 'The length of seahub api auth token should be 40'\n self.token = 'Token ' + token", "def id(self):\n return self.vdu_info.vdu_id", "def get_vm_id(self):\n return self.instance_metadata.vm_id", "def auth_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_token\")", "def video_id(self) -> str:\r\n return self._video_id", "def vid(self):\n return self._id", "def token():\n return os.environ.get('TOKEN', None)", "def key_vault_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_vault_id\")", "def key_vault_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_vault_id\")", "def key_vault_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_vault_id\")", "def key_vault_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_vault_id\")", "def video_id(self):\n # type: () -> string_types\n return self._video_id", "def vpd_id(self) -> str:\n return pulumi.get(self, \"vpd_id\")", "def vpd_id(self) -> str:\n return pulumi.get(self, \"vpd_id\")", "def _generate_token_value():\n return secrets.token_urlsafe()", "def get_token(self, bot_id):\n res = self.execute(TABELLE['bot']['select']['by_id'], (bot_id,))\n # print(res)\n return res", "def _convert_token_to_id(self, token):\n return self.vocab.get(token, self.vocab.get(self.unk_token))", "def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()", "def vswitch_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vswitch_id\")", "def vswitch_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vswitch_id\")", "def vswitch_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vswitch_id\")", "def load_token(self):\n token = None\n\n if config.outlook_token:\n token = self.token_constructor(config.outlook_token)\n\n return token", "def _getApiAuthToken(self):\n return settings.EBAY_API_AUTH_TOKEN", "def issueToken(self, request_id):\n json_object = self.post_json(\"/asset/token\", {\"requestId\": request_id})\n return json_object.token", "def getToken(self):\n \n raise NotImplementedError", "def token_to_id(self, token):\n token = self.process_token(token)\n return self.token2id.get(token, len(self.token2id) - 1)", "def get_token(self):\r\n token = {'id': self.catalog['access']['token']['id'],\r\n 'expires': self.catalog['access']['token']['expires'], }\r\n try:\r\n token['user_id'] = self.catalog['access']['user']['id']\r\n token['tenant_id'] = (\r\n self.catalog['access']['token']['tenant']['id'])\r\n except Exception:\r\n # just leave the tenant and user out if it doesn't exist\r\n pass\r\n return token", "def provision(self, policy):\n client = self.connect(VAULT_TOKEN)\n token = client.create_token(policies = [policy])\n return token[\"auth\"][\"client_token\"]", "def refresh_token(self, subid):\n from expfactory.database.models import Participant\n\n p = Participant.query.filter(Participant.id == subid).first()\n if p is not None:\n p.token = str(uuid.uuid4())\n self.session.commit()\n return p", "def token_to_id(self, token):\r\n return self.encoder.get(token, self.encoder.get(self.unk_token))", "def get_oauth_token():\n return session.get('remote_oauth')", "def token(self):\n\n return self.__token", "def get_token_from_rpx(self):\n url_params = {'token_url' : ''}\n http_response = urllib2.urlopen(RPX_POPUP_URL, urllib.urlencode(url_params))\n import pdb;pdb.set_trace()", "def current_token() -> object:\n return get_async_backend().current_token()", "def __set_version_id(self):\r\n VersionId = self.client.factory.create('VersionId')\r\n VersionId.ServiceId = self._version_info['service_id']\r\n VersionId.Major = self._version_info['major']\r\n VersionId.Intermediate = self._version_info['intermediate']\r\n VersionId.Minor = self._version_info['minor']\r\n self.logger.debug(VersionId)\r\n self.VersionId = VersionId", "def VplsIdAssignedNumber(self):\n return self._get_attribute('vplsIdAssignedNumber')", "def get_auth_token():\n if CFG.auth_enabled:\n auth_token = get_keystone_token()\n else:\n auth_token = 'notrealtoken'\n\n return auth_token" ]
[ "0.61384207", "0.611807", "0.5994977", "0.5994693", "0.5693623", "0.55299115", "0.5475123", "0.5429745", "0.5429745", "0.5375773", "0.53675216", "0.5362576", "0.53347", "0.53136176", "0.53107935", "0.5303457", "0.5282872", "0.5282872", "0.5282872", "0.5258768", "0.5255631", "0.5194702", "0.51896566", "0.51814824", "0.5172276", "0.51707643", "0.51572376", "0.5153081", "0.51473665", "0.5119729", "0.51139", "0.51115584", "0.5096999", "0.5096999", "0.50890005", "0.508855", "0.50799453", "0.5063049", "0.5059751", "0.5051713", "0.5047595", "0.5047595", "0.5047595", "0.5047595", "0.5038544", "0.5030926", "0.50172263", "0.50099015", "0.50071156", "0.5003587", "0.50000477", "0.49963924", "0.49939245", "0.49939245", "0.49939245", "0.49764153", "0.49712324", "0.4938843", "0.49341142", "0.49327642", "0.49248102", "0.49187168", "0.49136746", "0.4912563", "0.49088278", "0.49056858", "0.49047956", "0.49012795", "0.49004176", "0.48936024", "0.4892156", "0.4892156", "0.4892156", "0.4892156", "0.48897824", "0.48857066", "0.48857066", "0.4878804", "0.48653603", "0.48652643", "0.4862487", "0.48558667", "0.48558667", "0.48558667", "0.48526612", "0.4849474", "0.48467362", "0.48453018", "0.48442125", "0.48262045", "0.48175594", "0.48108187", "0.4802546", "0.4796756", "0.47964036", "0.47933036", "0.47922269", "0.47904125", "0.47856998", "0.47847918" ]
0.82442844
0
Gets and sets the appleId
def apple_id(self): if "appleId" in self._prop_dict: return self._prop_dict["appleId"] else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apple_id(self, apple_id):\n\n self._apple_id = apple_id", "def appid(self):\n return self._item[\"appid\"]", "def ApplicationId(self) -> _n_0_t_0:", "def app_id(self):\n return self._app_id", "def app_id(self) -> str:\n return self._app_id", "async def slashtagset_appid(self, ctx: commands.Context, id: int = None):\n app_id = id or self.bot.user.id\n await self.config.application_id.set(app_id)\n self.application_id = app_id\n await ctx.send(f\"Application ID set to `{id}`.\")", "def app_id(self, app_id):\n self._app_id = app_id", "def setAppID(self, appid):\n\t\tself.config.APP_ID = appid", "def app_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"app_id\")", "def get_id(self, app_name):\n _id = []\n apps = [app for app in self.applications.response if app.name == app_name]\n if len(apps) > 0:\n return apps[0].id", "def msa_app_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"msa_app_id\")", "def application_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_id\")", "def application_id(self) -> Optional[str]:\n return pulumi.get(self, \"application_id\")", "def app_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"app_id\")", "def app_id(self, app_id):\n\n self._app_id = app_id", "def unique_id(self):\n return self._device.mac", "def app_id(self):\n return self._app_id or self._modules['default'].data.get('application')", "def app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_id\")", "def _app_id(self):\n return '{}-{}'.format(self.config['app']['name'],\n self.config['app']['version'])", "def application_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_id\")", "def application_id(self, application_id):\n\n self._application_id = application_id", "def loan_application_id(self) -> str:\n return self._loan_application_id", "def __new_apple(self):\n apple_position = Position(randint(0, 7), randint(0, 7))\n while apple_position in self._snake.body:\n apple_position = Position(randint(0, 7), randint(0, 7))\n\n self._apple = self.Apple(apple_position)", "def set_ident(self) -> int:\n return self._set_id", "def user_id(self) -> str:\n return self.app_config()[\"metadata.user.id\"]", "def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")", "def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")", "def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")", "def identifier(self):\n return self._id", "def uuid(self, value):\n if value is not None:\n self.keystore['id'] = value\n elif 'id' in self.keystore:\n self.keystore.pop('id')", "def unique_id(self):\n return self.device_id", "def identifier(self):\r\n return self.id", "def identifier(self):\n return self._client.identifier", "def unique_id(self):\n return self._device_id", "def application_object_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_object_id\")", "def test_30_app_id_owner(self, mock):\r\n self.register()\r\n self.new_application()\r\n\r\n res = self.app.get('/app/sampleapp/settings', follow_redirects=True)\r\n assert \"Sample App\" in res.data, (\"Application should be shown to \"\r\n \"the owner\")\r\n msg = '<strong><i class=\"icon-cog\"></i> ID</strong>: 1'\r\n err_msg = \"Application ID should be shown to the owner\"\r\n assert msg in res.data, err_msg\r\n\r\n self.signout()\r\n with self.flask_app.app_context():\r\n self.create()\r\n self.signin(email=Fixtures.email_addr2, password=Fixtures.password)\r\n res = self.app.get('/app/sampleapp/settings', follow_redirects=True)\r\n assert res.status_code == 403, res.status_code", "def VendorId(self):\n\t\treturn self._get_attribute('vendorId')", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def alert_id(self):\n return self._alert_id", "def developer_app_insights_application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"developer_app_insights_application_id\")", "def identifier(self):\n return self.__id", "def _DefaultAppId():\n return os.getenv('APPLICATION_ID', '_')", "def getID():", "def get_device_id(self) -> str:\n return Config.get('device_id')", "def app_id(self):\n return self._chromecast.app_id if self._chromecast else None", "def test_id_osx_10_14_6(self):\n self.assertEqual(jc.parsers.id.parse(self.osx_10_14_6_id, quiet=True), self.osx_10_14_6_id_json)", "def set_ident(self) -> int:\n return self._set_ident", "def employee_id(self):\n for i in self.emp_dict:\n self.emp_id[i] = self.emp_dict[i][0]\n #print(self.emp_id)\n return self.emp_id", "def unique_id(self):\n return f\"{self.device.id}-{self.key}\"", "def unique_id(self) -> Optional[str]:\n return self._device.device_id", "def get_id(self):\n return self.email", "def get_id(self):\n return self.email", "def client_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_app_id\")", "def unique_id(self) -> str:\n return self.tahoma_device.url", "def getId(self):\n return self.identifier", "def get_primary_id(self):", "def _set_id(self, value):\n pass", "def getUID(self, cardService):\n try:\n # try to get ID from smartphone\n aidSize = [0x07]\n ending = [0x00]\n apdu = CLA_INS_P1_P2 + aidSize + AID_ANDROID + ending\n response, sw1, sw2 = self.myTransmit(cardService.connection, apdu)\n UID = toHexString(response)\n #apdu = [0x00]\n #response, sw1, sw2 = self.myTransmit(cardService.connection, apdu)\n #print 'esasdf : ', toHexString(response)\n except IndexError:\n # otherwise get ID from smartcard\n apdu = GET_UID\n response, sw1, sw2 = self.myTransmit(cardService.connection, apdu)\n UID = toHexString(response)\n return UID", "def application_object_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_object_id\")", "def device_id(self) -> str:\n return self._device_info[\"ID\"]", "def setid(self):\n return self.__setid", "def get_api_account_id(self):\n if self.api_account_id in [None, '']:\n options = string.letters + string.digits\n self.api_account_id = ''.join([\n random.choice(options)\n for i in range(64)\n ])\n self.save()\n return self.api_account_id", "def _get_device_id(api: Mobileclient) -> str:\n\n try:\n _get_device_id_from_environment()\n except KeyError:\n pass\n\n return _get_device_id_from_registered(api)", "def id(self):\n return self.config['key']", "def unique_id(self) -> str:\n return '{0}_{1}'.format(self._mac.replace(':', ''), self.entity_id)", "def device_id(self):\n return self.unique_id", "def setup_adobe_vendor_id(self, _db, library):\n short_client_token_initialization_exceptions = dict()\n adobe = ExternalIntegration.lookup(\n _db, ExternalIntegration.ADOBE_VENDOR_ID,\n ExternalIntegration.DRM_GOAL, library=library\n )\n\n if adobe:\n # Relatively few libraries will have this setup.\n vendor_id = adobe.username\n node_value = adobe.password\n if not (vendor_id and node_value):\n self.log.warn(\n \"Adobe Vendor ID is disabled due to missing or incomplete configuration. This is probably nothing to worry about.\")\n\n # But almost all libraries will have a Short Client Token\n # setup. We're not setting anything up here, but this is useful\n # information for the calling code to have so it knows\n # whether or not we should support the Device Management Protocol.\n registry = ExternalIntegration.lookup(\n _db, ExternalIntegration.OPDS_REGISTRATION,\n ExternalIntegration.DISCOVERY_GOAL, library=library\n )\n authdata = None\n if registry:\n try:\n authdata = ShortClientTokenUtility.from_config(library, _db)\n except CannotLoadConfiguration as e:\n short_client_token_initialization_exceptions[library.id] = e\n self.log.error(\n \"Short Client Token configuration for %s is present but not working. This may be cause for concern. Original error: %s\",\n library.name, str(e)\n )\n self.short_client_token_initialization_exceptions = short_client_token_initialization_exceptions\n return authdata", "def GetITunesSubscriptionId(cls, verify_response):\n return kITunesPrefix + verify_response.GetOriginalTransactionId()", "def unique_id(self):\n return self._deviceId", "def peer_azure_app_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"peer_azure_app_id\")", "def test_30_app_id_anonymous_user(self, Mock, mock):\r\n html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,\r\n {'content-type': 'application/json'})\r\n Mock.return_value = html_request\r\n\r\n self.register()\r\n self.new_application()\r\n self.signout()\r\n\r\n res = self.app.get('/app/sampleapp', follow_redirects=True)\r\n assert \"Sample App\" in res.data, (\"Application name should be shown\"\r\n \" to users\")\r\n assert '<strong><i class=\"icon-cog\"></i> ID</strong>: 1' not in \\\r\n res.data, \"Application ID should be shown to the owner\"", "def id(self, value):\n self._id = value", "def identifier(self):\n return self._identifier", "def identifier(self):\n return self._identifier", "def identifier(self):\n return self._identifier", "def identifier(self):\n return self._identifier", "def identifier(self):\n return self._identifier", "def identifier(self):\n return self._identifier", "def identifier(self):\n return self._identifier", "def identifier(self):\n return self._identifier", "def identifier(self):\n return self._identifier", "def uuid(self):\n try:\n return self.keystore['id']\n except KeyError:\n return None", "def getIdent (self) :\n return self.id", "def get_customer_id(self):\n return self.machine_config_file_value(\"DEFAULT.CID\").strip('\"')", "def get_identifier(self) -> str:\n return self.identifier", "def device_id(self):\n return self._id[0]", "def _get_id(self):\n return self.id", "def identifier(self) -> str:\n return self.doc['id']", "def getIdentifier(self):\n return self._config['identifier']", "def getIdentifier(self):\n return self._config['identifier']", "def bundle_id(self) -> str:\n return pulumi.get(self, \"bundle_id\")", "def bundle_id(self) -> str:\n return pulumi.get(self, \"bundle_id\")", "def application_object_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_object_id\")", "def userDocumentId(self, id: str) -> str:", "def id(self, value: str):\n self._id = value", "def identifier(self):\n\n return self._identifier", "def with_application_id(self, application_id):\n if not isinstance(application_id, str):\n raise TypeError('Application Id must be a string')\n\n self.application_id = application_id\n\n return self" ]
[ "0.8069875", "0.59851646", "0.5909273", "0.5829789", "0.5814948", "0.5765417", "0.5722226", "0.5701442", "0.5638001", "0.562049", "0.5616317", "0.5522016", "0.55123824", "0.55114925", "0.54531074", "0.54511666", "0.5419831", "0.5416108", "0.54157305", "0.53459615", "0.53231406", "0.5273735", "0.5269533", "0.5265268", "0.5238193", "0.52380055", "0.52380055", "0.52380055", "0.5228871", "0.5220751", "0.51999605", "0.5198916", "0.519053", "0.5182175", "0.518077", "0.51175106", "0.51166326", "0.5107816", "0.5107816", "0.5107816", "0.5107816", "0.51052976", "0.51026624", "0.50851077", "0.5078394", "0.5076733", "0.5074992", "0.5074807", "0.50560826", "0.5053388", "0.5044667", "0.5042746", "0.5041875", "0.50353265", "0.50353265", "0.5033546", "0.50252104", "0.5010173", "0.49871948", "0.49859256", "0.49847192", "0.49789068", "0.4973417", "0.49720612", "0.4961929", "0.49606735", "0.49572945", "0.49539733", "0.49472708", "0.4943924", "0.4942842", "0.4939971", "0.49366847", "0.49330384", "0.49237648", "0.4921531", "0.4921531", "0.4921531", "0.4921531", "0.4921531", "0.4921531", "0.4921531", "0.4921531", "0.4921531", "0.4920967", "0.49197578", "0.4918963", "0.49177006", "0.4917507", "0.49093878", "0.4908114", "0.49032408", "0.49032408", "0.49028292", "0.49028292", "0.49015737", "0.49000877", "0.4899684", "0.48914427", "0.4889512" ]
0.8489338
0
Gets and sets the vppOrganizationName
def vpp_organization_name(self): if "vppOrganizationName" in self._prop_dict: return self._prop_dict["vppOrganizationName"] else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def organization_name(self):\n if self.organization is not None:\n return self.organization.name\n\n return ''", "def organization(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"organization\")", "def getOrganization(self):\n return _libsbml.ModelCreator_getOrganization(self)", "def get_organization(self):\n return self.reference[REF_ORGANIZATION][REF_VALUE]", "def organization_id(self) -> str:\n return pulumi.get(self, \"organization_id\")", "def organization_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"organization_id\")", "def setOrganization(self, *args):\n return _libsbml.ModelCreator_setOrganization(self, *args)", "def organization_id():\n return os.environ[\"GCLOUD_ORGANIZATION\"]", "def organization_id(self):\n return self._organization_id", "def organization_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"organization_id\")", "def organization_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"organization_id\")", "def company_name(self):\n if \"companyName\" in self._prop_dict:\n return self._prop_dict[\"companyName\"]\n else:\n return None", "def organization(self):\n return self._tower.get_organization_by_id(self._data.get('organization'))", "def org_name(self, org_name):\n\n self._org_name = org_name", "def org_name(self, org_name):\n\n self._org_name = org_name", "def company_name(self) -> Optional[str]:\n return pulumi.get(self, \"company_name\")", "def clean_organization(self):\n return self.organization", "def find_organization(self):\n if self.org_id is not None:\n ItopapiPrototype.get_itop_class('Organization').find(self.org_id)\n return None", "def organization(self, value):\n organization = self._tower.get_organization_by_name(value)\n if not organization:\n raise InvalidOrganization(value)\n self._update_values('organization', organization.id)", "def isSetOrganization(self):\n return _libsbml.ModelCreator_isSetOrganization(self)", "def get_company_name(self):\n\t\treturn call_sdk_function('PrlLic_GetCompanyName', self.handle)", "def org_urn(self):\n return f\"psc:org:{self.credentials.org_key}\"", "def GetOrganization(**argd):\n flag, ret = CGateway.core.GetOrganizationName(argd[\"session\"])\n xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd[\"session\"])\n if xFlag is not None:\n return xFlag\n return CGateway._SuccessResponse({'return': ret})", "def org_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"org_id\")", "def unsetOrganization(self):\n return _libsbml.ModelCreator_unsetOrganization(self)", "def get_organization(self):\n pos_or_org = self.position.to_object\n if pos_or_org is None:\n return None\n elif pos_or_org.portal_type == 'position':\n return pos_or_org.get_organization()\n elif pos_or_org.portal_type == 'organization':\n return pos_or_org", "def organization_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"organization_arn\")", "def organization_unit(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"organization_unit\")", "def organization_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"organization_arn\")", "def organization_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"organization_arn\")", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def organizations(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"organizations\")", "def parent_organization(self) -> object:\n return self._parent_organization", "def organization(self, organization):\n\n self._organization = organization", "def organization(self, organization):\n\n self._organization = organization", "def organization(self, organization):\n\n self._organization = organization", "def org_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"org_id\")", "def display_org_with_default(self):\r\n if self.display_organization:\r\n return self.display_organization\r\n\r\n return self.org", "def get_org(self, name: str):\n org = self._get_org(name)\n if org.keychain:\n assert org.keychain is self\n else:\n org.keychain = self\n return org", "def organization_id(self, organization_id):\n\n self._organization_id = organization_id", "def organization_id(self, organization_id):\n\n self._organization_id = organization_id", "def author_organization(self,author,org=None):\n\n rowEle = self._get_author_row(author)\n orgEle = self.find_element(self.locators['organization'],rowEle)\n\n #FIXME: shenanigans begin\n orgName = orgEle.get_attribute('name')\n key = \"orgName-%s\" % (orgName)\n self.locators[key] = \"css=[name='%s']\" % (orgName)\n obj = Text(self,key)\n obj.detach_from_owner()\n #FIXME: shenanigans end\n\n oldorg = obj.value\n if org:\n obj.value = org\n # click the \"save changes\" button\n self.submit.click()\n del obj\n del self.locators[key]\n return oldorg", "def sfdc_org_id(self) -> str:\n return pulumi.get(self, \"sfdc_org_id\")", "def company_name(self, company_name):\n\n self._company_name = company_name", "def organization_current_get(request):\n if request.organization:\n return request.organization.slug\n else:\n return None", "def test_get_organization(self):\n pass", "def filter_organisation(self, org_name):\n return self.form.set_value(\"organisation search\", org_name)", "def _get_org(self, org_name):\n org = SpokeOrg()\n result = org.get(org_name)\n if result == []:\n msg = \"Can't find org %s\" % org_name\n self.log.error(msg)\n raise error.NotFound(msg) \n return result", "def __str__(self):\n if self.name != None and self.name != '':\n return self.name\n else:\n return \"Organization object owned by %s.\"%(self.owner)", "def moc_vnet_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"moc_vnet_name\")", "def organization_unit(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organization_unit\")", "def organization_unit(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organization_unit\")", "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def set_CompanyName(self, value):\n super(AddressValidationInputSet, self)._set_input('CompanyName', value)", "def organization(self, organization_id_or_name):\r\n return Organization(self, organization_id_or_name)", "def build_org(self, doc, entity):\n match = self.org_re.match(entity)\n if match and validations.validate_org_name(match.group(self.ORG_NAME_GROUP)):\n name = match.group(self.ORG_NAME_GROUP).strip()\n email = match.group(self.ORG_EMAIL_GROUP)\n if (email is not None) and (len(email) != 0):\n return creationinfo.Organization(name=name, email=email.strip())\n else:\n return creationinfo.Organization(name=name, email=None)\n else:\n raise SPDXValueError('Failed to extract Organization name')", "def org_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"org_id\")", "def get_organization_url(self, organization: Dict):\n return f\"{self.site_url}/organization/{organization['name']}\"", "def getOrganisation(self):\n return _libsbml.ModelCreator_getOrganisation(self)", "def GetOrganizationSettings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def sub_organization(self) -> object:\n return self._sub_organization", "def format_org_name(organization):\n secret_str = (\n \"\" if organization not in secret_orgs else \" {m(Secret){n\"\n )\n return \"%s%s\" % (organization.name, secret_str)", "def parent_organization(self, parent_organization: object):\n\n self._parent_organization = parent_organization", "def user_org_id(self) -> str:\n return self._user_org_id", "def is_organization(self):\n return self._is_name_type(self.ORGANIZATION)", "def test_retrieve_l_organization(self):\n pass", "def course_org(self):\n return self.course_key.org", "def organization(self):\r\n return Organization(self)", "def organization(self):\r\n return Organization(self)", "def organization(self):\r\n return Organization(self)", "def organization(self, organization_id):\r\n return organizations.Organization(self, organization_id)", "def common_organization_path(organization: str,) -> str:\n return \"organizations/{organization}\".format(organization=organization,)", "def company(self):\n return self._company", "def company(self):\n return self._company", "def partner_name(self) -> str:\n return pulumi.get(self, \"partner_name\")", "def partner_name(self) -> str:\n return pulumi.get(self, \"partner_name\")", "def properties(self) -> Optional['outputs.AzureDevOpsOrgPropertiesResponse']:\n return pulumi.get(self, \"properties\")", "def test_get_organization_from_api_key(self):\n pass", "def get_organization_unit(self):\n return self.reference[REF_ORGANIZATION_UNIT][REF_VALUE]", "def organizations(self):\n self.elements('organizations')", "def localOrganization(request):\n # Test Comment\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'localOrganizingCommittee.html',\n context_instance=RequestContext(request, {})\n )", "def get_one_organization_by_name(ctx, org_name):\n pprint(cmd.get_one_organization_by_name(\n client=ctx.obj, organization_name=org_name))", "def vnet_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vnet_name\")", "def get_company(self, name):\n return self.website.company.id", "def get_company(self, name):\n return self.instance.company.id", "def design_company(self):\n return self._design_company", "def common_organization_path(\n organization: str,\n ) -> str:\n return \"organizations/{organization}\".format(\n organization=organization,\n )", "def get_company_id_parameter(self):\n company_id_field = self.view.company_id_field\n if company_id_field:\n return Parameter(\n name=company_id_field,\n in_='query',\n type='integer',\n description='The ID of the company to manage',\n required=True,\n )\n\n return None", "def organization_field(self, field_id):\r\n return organizations.OrganizationField(self, field_id)", "def test_organization_pickername(self):\n # scenario 1: when only title is given\n abnegation = models.Organization(title=\"Abnegation\")\n self.assertIsInstance(abnegation.pickername, str)\n self.assertEqual(abnegation.pickername, abnegation.title)\n\n # scenario 2: when both name and title are given\n name = 'cullens'\n title = 'The Cullens'\n olympic_coven = models.Organization(title=title)\n olympic_coven.name = name\n db.session.add(olympic_coven)\n db.session.commit()\n self.assertIsInstance(olympic_coven.pickername, str)\n assert (\n '{title} (@{name})'.format(title=title, name=name)\n in olympic_coven.pickername\n )", "def vnet_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vnet_name\")", "def virtual_name(self) -> Optional[str]:\n return pulumi.get(self, \"virtual_name\")", "def virtual_name(self) -> Optional[str]:\n return pulumi.get(self, \"virtual_name\")", "def getvversionprofilesorganization(\n self, version, ms_correlation_id=None, ms_request_id=None, custom_headers=None, raw=False, **operation_config):\n # Construct URL\n url = self.getvversionprofilesorganization.metadata['url']\n path_format_arguments = {\n 'version': self._serialize.url(\"version\", version, 'str')\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n\n # Construct headers\n header_parameters = {}\n header_parameters['Content-Type'] = 'application/json; charset=utf-8'\n if self.config.generate_client_request_id:\n header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())\n if custom_headers:\n header_parameters.update(custom_headers)\n if ms_correlation_id is not None:\n header_parameters['MS-CorrelationId'] = self._serialize.header(\"ms_correlation_id\", ms_correlation_id, 'str')\n if ms_request_id is not None:\n header_parameters['MS-RequestId'] = self._serialize.header(\"ms_request_id\", ms_request_id, 'str')\n if self.config.accept_language is not None:\n header_parameters['accept-language'] = self._serialize.header(\"self.config.accept_language\", self.config.accept_language, 'str')\n\n # Construct and send request\n request = self._client.get(url, query_parameters)\n response = self._client.send(request, header_parameters, stream=False, **operation_config)\n\n if response.status_code not in [200, 201, 400, 401, 403, 404, 500]:\n exp = CloudError(response)\n exp.request_id = response.headers.get('x-ms-request-id')\n raise exp\n\n deserialized = None\n\n if response.status_code in [200, 201]:\n deserialized = self._deserialize('MicrosoftPartnerSdkContractsV1OrganizationProfile', response)\n\n if raw:\n client_raw_response = ClientRawResponse(deserialized, response)\n return client_raw_response\n\n return deserialized", "def setOrganisation(self, *args):\n return _libsbml.ModelCreator_setOrganisation(self, *args)", "def get_company(self, name):\n return self.store.company.id", "def setName(self, *args):\n return _libsbml.Compartment_setName(self, *args)", "def get_user_home_organization_name():\n if not is_authenticated() or 'samlUserdata' not in session:\n return None\n\n home_organization_id = session.get('samlUserdata', {}).get(SAML_ATTRIBUTES.get('haka_org_name', None), False)\n\n return home_organization_id[0] if home_organization_id else not_found('home_organization_id')\n return None", "def set_org_and_space(self, org_name, space_name):\n res = self._cc.organizations().get_by_name(org_name)\n self._org = res.resource\n\n res = self._cc.request(self._org.spaces_url).get_by_name(space_name)\n self._space = res.resource\n return self" ]
[ "0.684137", "0.6185486", "0.61104095", "0.60688007", "0.6068101", "0.6055266", "0.59143037", "0.5851724", "0.5840031", "0.58396685", "0.58396685", "0.58298886", "0.5803846", "0.5779328", "0.5779328", "0.56808895", "0.56748295", "0.56549084", "0.5612998", "0.5557546", "0.5490174", "0.54746515", "0.5433192", "0.53866017", "0.5382352", "0.5329592", "0.5325158", "0.5312041", "0.5268666", "0.5268666", "0.52590066", "0.52590066", "0.522945", "0.52226377", "0.5206489", "0.5206489", "0.5206489", "0.5166314", "0.5155805", "0.5139714", "0.51390624", "0.51390624", "0.51322585", "0.50874376", "0.50662804", "0.5062961", "0.50506556", "0.50464857", "0.5044821", "0.503715", "0.50298965", "0.50297743", "0.50297743", "0.5016105", "0.5013948", "0.50044155", "0.49903378", "0.49765292", "0.4951035", "0.4945388", "0.49015668", "0.48907253", "0.4884997", "0.48835912", "0.48428783", "0.483643", "0.48296705", "0.48159498", "0.4814043", "0.4814043", "0.4814043", "0.48041", "0.47850427", "0.47774166", "0.47774166", "0.4777086", "0.4777086", "0.47724932", "0.47673514", "0.47505313", "0.4744337", "0.47319564", "0.47275993", "0.47217554", "0.46943218", "0.46921945", "0.46861038", "0.46712816", "0.4666258", "0.46594146", "0.46430272", "0.46320277", "0.46045014", "0.46045014", "0.45886314", "0.45823225", "0.4580973", "0.45714214", "0.45665428", "0.45388845" ]
0.83878106
0
Gets and sets the genres
def genres(self): if "genres" in self._prop_dict: return self._prop_dict["genres"] else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_genres(self) -> List[Genre]:\n raise NotImplementedError", "def get_genre(self):\n self.genre = imdb.get_title_genres(self.ID)\n self._genre_printer()", "async def get_genres(self) -> APIReturn:\n return await self._request(\"GET\", \"/getGenres\")", "def recommendation_genre_seeds(self) -> List[str]:\n return self._get('recommendations/available-genre-seeds')['genres']", "def get_all_genres(self):\n self.cursor.execute(\"select * from genres\")\n self.connection.commit()\n return self.cursor.fetchall()", "def set_genre(self, genre: str) -> None:\n self.genre = genre", "def _get_genres(self):\n separated = self.movies['genre'].apply(self.separate_genre)\n return {g: True for x in separated for g in x}.keys()", "def recommendation_genre_seeds(self, **kwargs):\n return self._get(API.RECOMMENDATIONS_GENRES.value, **kwargs)", "def all(self):\n genres = []\n for row in self.db.cursor().execute('SELECT genre_id, name FROM genres'):\n genre = {\n 'id' : row[0],\n 'name' : row[1]\n }\n genres.append(genre)\n\n return genres", "def getGenres(movieInfo):\n if \"genres\" in movieInfo:\n return [ _format(genre[\"name\"]) for genre in movieInfo[\"genres\"] ]\n else:\n raise AttributeError(\"%s instance has no attribute genre\" % movieInfo)", "def get_recommendation_genre_seeds(client = None):\n\n return client.recommendation_genre_seeds()['genres']", "def get_genres(type_: str, value_: str, page: int, step: int):\n genre = factory.get_elem_list(Genre, type_, value_, page, step)\n return genre", "def saveTmdbGenres():\n \n listGenres = tmdb.Genres().list()[\"genres\"]\n \n genres = { _format(g[\"name\"]):i for i, g in enumerate(listGenres) }\n\n np.save(GENRES_FILE, np.asarray([genres]))", "def listGenres(movieId):\n genres = movies.at[movieId, 'genres'] #change movies to whatever variable name the movies df has\n genres = genres.split('|')\n return genres", "def getGenreDictionary(self):\n genreDictionary = {}\n genres = [Genre(u'action', 1),\n Genre(u'adventure', 2),\n Genre(u'cars', 3),\n Genre(u'comedy', 4),\n Genre(u'dementia', 5),\n Genre(u'demons', 6),\n Genre(u'mystery', 7),\n Genre(u'drama', 8),\n Genre(u'ecchi', 9),\n Genre(u'fantasy', 10),\n Genre(u'game', 11),\n Genre(u'hentai', 12),\n Genre(u'historical', 13),\n Genre(u'horror', 14),\n Genre(u'kids', 15),\n Genre(u'magic', 16),\n Genre(u'martial_arts', 17),\n Genre(u'mecha', 18),\n Genre(u'music', 19),\n Genre(u'parody', 20),\n Genre(u'samurai', 21),\n Genre(u'romance', 22),\n Genre(u'school', 23),\n Genre(u'sci-fi', 24),\n Genre(u'shoujo', 25),\n Genre(u'shoujo_ai', 26),\n Genre(u'shounen', 27),\n Genre(u'shounen_ai', 28),\n Genre(u'space', 29),\n Genre(u'sports', 30),\n Genre(u'super_power', 31),\n Genre(u'vampire', 32),\n Genre(u'yaoi', 33),\n Genre(u'yuri', 34),\n Genre(u'harem', 35),\n Genre(u'slice_of_life', 36),\n Genre(u'supernatural', 37),\n Genre(u'military', 38),\n Genre(u'police', 39),\n Genre(u'psychological', 40),\n Genre(u'thriller', 41),\n Genre(u'seinen', 42),\n Genre(u'josei', 43)]\n for genre in genres:\n genreDictionary[genre] = 0\n for genre in self.anime_genres:\n genreDictionary[genre] = 1\n return genreDictionary", "def get_genre(self) -> Optional[str]:\n return self.genre", "def get_genre(self, object_id):\n return self.get_object(\"genre\", object_id)", "def getTmdbGenres():\n\n #If the file is not present in the resource, creates it \n if not isfile(GENRES_FILE):\n saveTmdbGenres()\n\n return np.load(GENRES_FILE)[0]", "def get_metadata(data):\n genres = list(data[\"genre\"])\n print(\"genres:\", len(set(genres)), set(genres))\n return genres", "def random_by_genre_list(self):\n\n for genre in self.connection.walk_genres():\n url = self.build_url({\n \"mode\": \"random_by_genre_track_list\",\n \"foldername\": genre[\"value\"].encode(\"utf-8\")})\n\n li = xbmcgui.ListItem(genre[\"value\"])\n xbmcplugin.addDirectoryItem(\n handle=self.addon_handle, url=url, listitem=li, isFolder=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def getGenreVector(self):\n genreDictionary = self.getGenreDictionary()\n sortedGenres = sorted(genreDictionary.items(), key = lambda tup : tup[0].genre_id)\n return [tup[1] for tup in sortedGenres]", "def search_genres(self, needle):\n return self._genre_search.search(searchable(needle))", "def genes():\n return [\"b2935\", \"b0723\", \"b0451\"]", "def get_genre(self, gen: str) -> Genre:\n self.logging.log(15, f\"getting genre: {gen}\")\n return self.sess.query(Genre).filter(Genre.genre == gen).one()", "def parse_genres(genres):\n\tgenre_list = []\n\tfor genre in genres:\n\t\tgenre_list.append(genre.name)\n\n\treturn \", \".join(genre_list)", "def get_genre(id_genre):\n genre = factory.get_elem_solo(Genre, id_genre)\n return genre", "def _genre_printer(self):\n print(\"Year: \" + str(self.genre['year'])) # Prints out year\n print(\"Genre: \" + ', '.join(self.genre['genres']) + \"\\n\") # Prints out genres", "def genes():\n data=pd.read_csv(config['stan'], sep=\" \")\n return list(set(data['Gene_id']))", "def set_random_genres(self, num:int):\n try:\n self.cursor.execute(\"insert into genres (name, example, year) \"\n \"select rand.name, rand.example, rand.year \"\n \"from (SELECT \"\n \"(md5(random()::text)) as name, \"\n \"(md5(random()::text)) as example, \"\n \"2020 - trunc(Random()*1000)::integer as year \"\n f\"from generate_series(1,{num})) as rand\")\n self.connection.commit()\n if self.cursor.rowcount:\n return \"generated genres\"\n else:\n return \"NULL\"\n except(Exception, psycopg2.Error) as error:\n self.connect.rollback()\n print(\"error in generate\", error)", "def get_artist_genres(artist_id):\n profile_endpoint = 'https://api.spotify.com/v1/artists/' + artist_id\n\n r = requests.get(profile_endpoint)\n print r.status_code\n \n if r.status_code != 200:\n return None\n artist_info = r.json()\n genre_names = artist_info['genres']\n genres = [Genre(name=genre) for genre in genre_names]\n print genres\n return genres", "def get_genre(self, id):\n for row in self.db.cursor().execute('SELECT genre_id, name FROM genres WHERE genre_id=' + str(id)):\n genre = {\n 'id' : row[0],\n 'name' : row[1]\n }\n\n return genre", "def genres(request):\n\n genre_list = Genres.objects.all()\n template = 'genres.html'\n context = {'genre_list': genre_list, }\n\n return render(request, template, context)", "def genre_list(self):\n\n for genre in self.connection.walk_genres():\n url = self.build_url({\n \"mode\": \"albums_by_genre_list\",\n \"foldername\": genre[\"value\"].encode(\"utf-8\")})\n\n li = xbmcgui.ListItem(genre[\"value\"])\n xbmcplugin.addDirectoryItem(\n handle=self.addon_handle, url=url, listitem=li, isFolder=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def movie_list(self):\n return self._request_obj(self._urls[\"movie_list\"], key=\"genres\")", "def genotypes(self):\n return self.data.genotypes.values", "def test_get_genres_no_params(self, id_name_elms, service_config, request):\n service_config.genre_store.get_all.return_value = id_name_elms\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_genres(params)\n\n assert results == id_name_elms, 'Expected all genres returned'", "def display_genre(self):\n \n # Get first 3 genres and join to a string.\n return ', '.join([ genre.name for genre in self.genre.all()[:3] ])", "def random_by_genre_track_list(self):\n\n genre = self.addon_args[\"foldername\"][0].decode(\"utf-8\")\n\n xbmcplugin.setContent(self.addon_handle, \"songs\")\n\n for track in self.connection.walk_random_songs(\n size=self.random_count, genre=genre):\n self.add_track(track, show_artist=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def display_genre(self):\n\n\t\treturn ', '.join(genre.name for genre in self.genre.all()[:3])", "def display_genre(self, *args):\n return ', '.join(genre.name for genre in args[0].genre.all()[:3])", "def generate_genotype(self):\n genes = []\n for i in range(self.n_genes):\n genes.append(self.Gene(n_bases=self.n_bases))\n self.genes = genes", "def _get_obj_geneset(self, obj):\n obj_geneset = set(obj.input.get(\"mutations\", []))\n if not obj_geneset:\n # Geneset is given via geneset input:\n gs = self.resolwe.geneset.get(obj.input[\"geneset\"])\n obj_geneset = set(gs.genes)\n\n # Convert to gene symbols in case genes are given as feature ID's\n if gs.output[\"source\"] != \"UCSC\":\n qs = self.resolwe.feature.filter(feature_id__in=list(obj_geneset))\n id_2_name = {obj.feature_id: obj.name for obj in qs}\n obj_geneset = set([id_2_name[gene] for gene in obj_geneset])\n\n return obj_geneset", "def get_movies_by_genre(self, genre: str):\n raise NotImplementedError", "def dump_genres_to_db() -> None:\n # TODO: connection should probably be done in a safer way\n db = database.SessionLocal()\n genres = get_genres()\n\n for key in genres:\n formatted_genre = schemas.Genre(\n id=key,\n name=genres[key],\n value=genres[key],\n )\n\n db_genre = crud.get_genre_by_id(db=db, genre_id=key)\n if not db_genre:\n crud.create_genre(db=db, genre=formatted_genre)\n db.close()", "def format_genres() -> None:\n db = database.SessionLocal()\n genres = crud.get_all_genres(db=db)\n\n formatted_genres = [\n schemas.Genre(\n id=genre.id,\n name=str(genre.name).replace(' & ', '%20%26%20'),\n value=genre.name\n )\n for genre in genres if ' & ' in genre.name\n ]\n\n for genre in formatted_genres:\n crud.update_genre_name(db, genre)", "def genre_choices(request):\n choices = GENRES\n diction = {}\n li = []\n for data in choices:\n li.append(data[0])\n diction['GENRE_CHOICES'] = li\n return JsonResponse(data=diction, status=status.HTTP_200_OK)#, safe=False)", "def geneset(self, value: Union[str, int, Geneset, List[str]]):\n # Geneset can be set only once, prevent modifications\n if self._geneset is not None:\n raise ValueError(\"It is not allowed to change geneset value.\")\n\n if value is None:\n return\n\n # If id / slug of a geneset is given, get it from the Resolwe server\n if isinstance(value, (int, str)):\n gs = self.resolwe.geneset.get(value)\n value = gs.genes\n elif isinstance(value, Geneset):\n value = value.genes\n\n if isinstance(value, (list, set, tuple, pd.Series)):\n self._geneset = set(value)\n else:\n raise ValueError(f'Unsupported type of \"geneset\" input: {value}.')", "def get_variants(cls, gen, folder):\n filename = 'temp_output{}.txt'.format(gen)\n\n with open(os.path.join(folder, filename), encoding='utf_8_sig', mode='r') as f:\n lines = f.readlines()\n\n for line in lines:\n if line.startswith('Phonemes'):\n line = line.strip()\n phonemes = line.split(':')[-1].split(',')\n if line.startswith('Allophones'):\n allophones = dict()\n line = line.strip()\n line = line.split(':')[-1]\n if not line:\n pass #no variation this turn\n else:\n line = line.split(',')\n for pair in line:\n ur,sr = pair.split('~')\n allophones[sr] = ur\n\n return phonemes,allophones", "def display_genre(self):\n return ', '.join(genre.name for genre in self.genre.all()[:3])", "def fetch_genre(self, gid: str):\n self.logging.info(f\"fetching genre: {gid}\")\n return self.sess.query(Genre).filter(Genre.gid == gid).one()", "def query_all_genres():\n result = session.query(Genre).all()\n for genre in result:\n print(\"genre: %s\\ndescription: %s\\nposter: %s\\nuser_id:%s\" %\n (genre.name, genre.description, genre.poster, genre.user_id))\n print(\"**************************\")", "def ratings_genres(Y, genres):\n plt.subplot(311)\n ratings_genre(Y, genres, 1)\n plt.subplot(312)\n ratings_genre(Y, genres, 2)\n plt.subplot(313)\n ratings_genre(Y, genres, 3)\n plt.tight_layout()\n plt.show()", "def book_genre_list(request):\n genres = Genre.objects.all().order_by('-name')\n return render(request, 'library/book_genre_list.html', {\"genres\": genres, })", "def chatbot_genre_query(self, genres: list): #-> cursor object\n if not self.client:\n self.connect()\n return self.db.find({\"$query\": { \"genre\": { \"$in\": genres }}, \"$orderby\": { \"avg_vote\" : -1 }}).limit(25)", "def display_genre(self):\n return ', '.join([ genre.name for genre in self.genre.all()[:3] ])", "def tv_list(self):\n return self._request_obj(self._urls[\"tv_list\"], key=\"genres\")", "def sort_genre(self):\n return self.sort('genre')", "def GetGenreMetaData():\n\t\n\tdf = pd.read_csv(\"_data\\\\fma_metadata\\\\genres.csv\",header=0)\n\treturn df", "def _get_genre_vector(self, movie_id, list=False):\n if list:\n return self.movies[self.movies['movie_id'] == movie_id][self.genres].iloc[0].tolist()\n else:\n return self.movies[self.movies['movie_id'] == movie_id][self.genres].iloc[0].as_matrix()", "def _variants_gen(self, test):\n return self._get_variants_gen(test).gen(test)", "def set_genotype(self, genotype):\n self.data[\"GT\"] = genotype\n self._genotype_updated()", "def _get_artists_genre(self, track_df):\n\n sp = self.spotify_clients['user-library-read']\n artistid_to_genre = {}\n\n \"\"\"\n TODO\n Get genre by each artist_id is slow,\n but I got some problems when I use spotipy's artists function\n \"\"\"\n\n for artist_id in track_df['artist_id'].tolist():\n result = sp.artist(artist_id)\n artist_id = result['id']\n genres = result['genres']\n\n if len(genres) == 0:\n continue\n else:\n artistid_to_genre[artist_id] = genres\n\n return artistid_to_genre", "def parse_genres_for_video (self, video, genres):\n video_genres = []\n\n for video_genre_key, video_genre in video['genres'].iteritems():\n if self._is_size_key(video_genre_key) == False and video_genre_key != 'summary':\n name = genres.get(video_genre[1], {}).get('name')\n\n if name:\n video_genres.append(name)\n\n return video_genres", "def getGeneList( self ):\n return self.geneList", "def test_get_songs_by_genre(self, track_elms, service_config, request):\n genre_id = uuid.UUID(avalon.compat.to_uuid_input('c12d2a49-d086-43d6-953d-b870deb24228'))\n service_config.track_store.get_by_genre.return_value = track_elms\n service_config.id_cache.get_genre_id.return_value = genre_id\n request.args['genre'] = 'Genre'\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'\n service_config.track_store.get_by_genre.assert_called_with(genre_id)", "def add_genre(request):\n\tif request.method == \"POST\":\n\t\tform = GenreForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tgenre = form.save(commit=False)\n\t\t\tgenre.save()\n\telse:\n\t\tform = GenreForm()\n\n\tgenres = Genre.objects.all()\n\tif genres:\n\t\tavailable_genre = parse_genres(genres)\n\t\tmessages.info(request, available_genre)\n\telse:\n\t\tinfo_msg = \"\"\"\n\t\t\t\t\tThere is no genre registered in the system by user. 'unspecified' is a default genre\n\t\t\t\t\tto choose while creating a new book\n\t\t\t\t \"\"\"\n\t\tmessages.info(request, info_msg)\n\n\treturn render(request, 'BookManagement/genres.html', {'form' : form})", "def _generator():\n filename_1 = 'gene.txt'\n filename_2 = 'geneSynonym.txt'\n gene_set_1 = gene_names(filename_1)\n gene_syn = gene_names(filename_2, complete=False)\n genes = gene_set_1 | gene_syn\n return genes", "def _genre_button():\n OPTIONS = [\n \"Article\",\n \"Paragraph\",\n \"Lyrics\"\n ]\n\n self.genre_button = StringVar(self.master)\n self.genre_button.set(\"Paragraph\") # default value\n\n dropdown = OptionMenu(self.master, self.genre_button, \"Genre\", *OPTIONS)\n dropdown.place(height=30, width=100, relx=0.63, rely=0.6, anchor='center')\n\n \"\"\"\n Everytime they choose a different genre the program should randomize a text\n from that genre making a link to the database\n \"\"\"", "def update_genres(source_item: Dict, target_item: Dict) -> None:\n for genre in target_item.get('genre', []):\n for item in source_item['highlight'].get('genres', []):\n if genre['name'].strip() in remove_html_tags(item):\n genre['name'] = item", "def test_get_genres_query_param(self, id_name_elms, service_config, request):\n service_config.search.search_genres.return_value = id_name_elms\n request.args['query'] = 'Dummy'\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_genres(params)\n\n assert results == id_name_elms, 'Expected matching genres returned'", "def mel_gene_set(dict): # this uses the flanking genes, specifically\n\tmel_gene_set = set()\n\tfor k, v in dict.iteritems():\n\t\t#v[0] is up, v[1] is down\n\t\t#print \"this is v:\", v\n\t\tfor mg in v[0]:\n\t\t\tmel_gene_set.add(mg)\n\t\tfor mg in v[1]:\n\t\t\tmel_gene_set.add(mg)\n\treturn mel_gene_set", "def _optionsmenu_change_genre():\n self.helpindex = Toplevel(self.master)\n self.helpindex.title(\"Change Genre\")\n self.helpindex.geometry(\"300x500\")", "def readGenes(gtf):\n #read gtf\n genes = HTSeq.GenomicArrayOfSets(\"auto\", stranded=False)\n gs = {}\n for line in open(gtf):\n if line.startswith(\"#\"):\n continue\n line = line.split(\"\\n\")[0].split(\"\\t\")\n if line[2] != 'exon':\n continue\n ds = parseGtfFeature(line[8])\n key = \"|\".join([ds[\"gene_id\"], ds[\"gene_name\"]])\n nline = [\n line[0], line[3], line[4],\n \"|\".join([ds[\"gene_id\"], ds[\"gene_name\"]]), \".\", line[6]\n ]\n if key not in gs:\n gs[key] = [line[0], int(line[3]), int(line[4])]\n else:\n if int(line[3]) < gs[key][1]:\n gs[key][1] = int(line[3])\n if int(line[4]) > gs[key][2]:\n gs[key][2] = int(line[4])\n for g, v in gs.items():\n iv = HTSeq.GenomicInterval(v[0], v[1], v[2])\n genes[iv] += g\n return genes", "def get_genre():\n\n cnx,cur = connect_to_db() #get connection with db\n cur.execute(\"SELECT DISTINCT genre FROM genres\") #sql query to return all genres\n lst = cur.fetchall()\n cur.close()\n cnx.close()\n return lst", "def albums_by_genre_list(self):\n\n genre = self.addon_args[\"foldername\"][0].decode(\"utf-8\")\n\n xbmcplugin.setContent(self.addon_handle, \"albums\")\n\n for album in self.connection.walk_album_list_genre(genre):\n self.add_album(album, show_artist=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def getJr358Genes():\n return ('MG_001', 'MG_003', 'MG_004', 'MG_005', 'MG_006', 'MG_007', 'MG_008', 'MG_009', 'MG_012', 'MG_013', 'MG_014', 'MG_015', 'MG_019', 'MG_020', 'MG_021', 'MG_022', 'MG_023', 'MG_026', 'MG_027', 'MG_029', 'MG_030', 'MG_031', 'MG_033', 'MG_034', 'MG_035', 'MG_036', 'MG_037', 'MG_038', 'MG_039', 'MG_040', 'MG_041', 'MG_042', 'MG_043', 'MG_044', 'MG_045', 'MG_046', 'MG_047', 'MG_048', 'MG_049', 'MG_050', 'MG_051', 'MG_052', 'MG_053', 'MG_055', 'MG_473', 'MG_058', 'MG_059', 'MG_061', 'MG_062', 'MG_063', 'MG_064', 'MG_065', 'MG_066', 'MG_069', 'MG_070', 'MG_071', 'MG_072', 'MG_073', 'MG_075', 'MG_077', 'MG_078', 'MG_079', 'MG_080', 'MG_081', 'MG_082', 'MG_083', 'MG_084', 'MG_085', 'MG_086', 'MG_087', 'MG_088', 'MG_089', 'MG_090', 'MG_091', 'MG_092', 'MG_093', 'MG_094', 'MG_097', 'MG_098', 'MG_099', 'MG_100', 'MG_101', 'MG_102', 'MG_476', 'MG_104', 'MG_105', 'MG_106', 'MG_107', 'MG_109', 'MG_110', 'MG_111', 'MG_112', 'MG_113', 'MG_114', 'MG_118', 'MG_119', 'MG_120', 'MG_121', 'MG_122', 'MG_123', 'MG_124', 'MG_126', 'MG_127', 'MG_128', 'MG_130', 'MG_132', 'MG_136', 'MG_137', 'MG_139', 'MG_141', 'MG_142', 'MG_143', 'MG_145', 'MG_149', 'MG_150', 'MG_151', 'MG_152', 'MG_153', 'MG_154', 'MG_155', 'MG_156', 'MG_157', 'MG_158', 'MG_159', 'MG_160', 'MG_161', 'MG_162', 'MG_163', 'MG_164', 'MG_165', 'MG_166', 'MG_167', 'MG_168', 'MG_169', 'MG_170', 'MG_171', 'MG_172', 'MG_173', 'MG_174', 'MG_175', 'MG_176', 'MG_177', 'MG_178', 'MG_179', 'MG_180', 'MG_181', 'MG_182', 'MG_183', 'MG_184', 'MG_186', 'MG_187', 'MG_188', 'MG_189', 'MG_190', 'MG_191', 'MG_192', 'MG_194', 'MG_195', 'MG_196', 'MG_197', 'MG_198', 'MG_200', 'MG_201', 'MG_203', 'MG_204', 'MG_205', 'MG_206', 'MG_208', 'MG_209', 'MG_210', 'MG_481', 'MG_482', 'MG_212', 'MG_213', 'MG_214', 'MG_215', 'MG_216', 'MG_217', 'MG_218', 'MG_221', 'MG_224', 'MG_225', 'MG_226', 'MG_227', 'MG_228', 'MG_229', 'MG_230', 'MG_231', 'MG_232', 'MG_234', 'MG_235', 'MG_236', 'MG_238', 'MG_239', 'MG_240', 'MG_244', 'MG_245', 'MG_249', 'MG_250', 'MG_251', 'MG_252', 'MG_253', 'MG_254', 'MG_257', 'MG_258', 'MG_259', 'MG_261', 'MG_262', 'MG_498', 'MG_264', 'MG_265', 'MG_266', 'MG_270', 'MG_271', 'MG_272', 'MG_273', 'MG_274', 'MG_275', 'MG_276', 'MG_277', 'MG_278', 'MG_282', 'MG_283', 'MG_287', 'MG_288', 'MG_289', 'MG_290', 'MG_291', 'MG_292', 'MG_293', 'MG_295', 'MG_297', 'MG_298', 'MG_299', 'MG_300', 'MG_301', 'MG_302', 'MG_303', 'MG_304', 'MG_305', 'MG_309', 'MG_310', 'MG_311', 'MG_312', 'MG_315', 'MG_316', 'MG_317', 'MG_318', 'MG_321', 'MG_322', 'MG_323', 'MG_324', 'MG_325', 'MG_327', 'MG_329', 'MG_330', 'MG_333', 'MG_334', 'MG_335', 'MG_517', 'MG_336', 'MG_339', 'MG_340', 'MG_341', 'MG_342', 'MG_344', 'MG_345', 'MG_346', 'MG_347', 'MG_349', 'MG_351', 'MG_352', 'MG_353', 'MG_355', 'MG_356', 'MG_357', 'MG_358', 'MG_359', 'MG_361', 'MG_362', 'MG_363', 'MG_522', 'MG_365', 'MG_367', 'MG_368', 'MG_369', 'MG_370', 'MG_372', 'MG_375', 'MG_376', 'MG_378', 'MG_379', 'MG_380', 'MG_382', 'MG_383', 'MG_384', 'MG_385', 'MG_386', 'MG_387', 'MG_390', 'MG_391', 'MG_392', 'MG_393', 'MG_394', 'MG_396', 'MG_398', 'MG_399', 'MG_400', 'MG_401', 'MG_402', 'MG_403', 'MG_404', 'MG_405', 'MG_407', 'MG_408', 'MG_409', 'MG_410', 'MG_411', 'MG_412', 'MG_417', 'MG_418', 'MG_419', 'MG_421', 'MG_424', 'MG_425', 'MG_426', 'MG_427', 'MG_428', 'MG_429', 'MG_430', 'MG_431', 'MG_433', 'MG_434', 'MG_435', 'MG_437', 'MG_438', 'MG_442', 'MG_444', 'MG_445', 'MG_446', 'MG_447', 'MG_448', 'MG_451', 'MG_453', 'MG_454', 'MG_455', 'MG_457', 'MG_458', 'MG_460', 'MG_462', 'MG_463', 'MG_464', 'MG_465', 'MG_466', 'MG_467', 'MG_468', 'MG_526', 'MG_470')", "def set_generation(self,gene):\n self.__generation = gene", "def load(self, gen=None):\n try:\n path = f\"population{'_backup' if self.use_backup else ''}/\" \\\n f\"storage/\" \\\n f\"{self.folder_name}/\" \\\n f\"{self}/\" \\\n f\"generations/\"\n if gen is None:\n # Load in all previous populations\n populations = glob(f\"{path}gen_*\")\n if not populations: raise FileNotFoundError\n \n # Find newest population and save generation number under 'gen'\n populations = [p.replace('\\\\', '/') for p in populations]\n regex = r\"(?<=\" + re.escape(f'{path}gen_') + \")[0-9]*\"\n gen = max([int(re.findall(regex, p)[0]) for p in populations])\n \n # Load in the population under the specified generation\n pop = load_pickle(f'{path}/gen_{gen:05d}')\n self.best_fitness = pop.best_fitness\n self.best_genome = pop.best_genome\n self.best_genome_hist = pop.best_genome_hist\n self.config = pop.config\n self.generation = pop.generation\n self.population = pop.population\n self.reporters = pop.reporters\n self.reproduction = pop.reproduction\n self.species = pop.species\n self.species_hist = pop.species_hist\n self.log(f\"\\nPopulation '{self}' loaded successfully! Current generation: {self.generation}\")\n return True\n except FileNotFoundError:\n return False", "def samples_set(self):\n self.get_samples_set(self.samples_db)\n self.choose_samples(self.chosen_samples_db, self.chosen_hashes)", "def load_gene_set(self, gene_set:List[str], taxon:str=None):\n self.gene_set = gene_set\n self.taxon = taxon", "def test_genbank_to_genome_taxonomy(self):\n result = self.gfu.genbank_to_genome(self.ctx, {\n 'workspace_name': self.ws_name,\n 'generate_ids_if_needed': 'true', # why is this a string\n 'taxon_id': '3702',\n 'file': {\n 'path': f\"{_DATA_PATH}/wigglesworthia/genome.gb\"\n },\n 'genome_name': str(uuid4()),\n })\n ('result', result)\n ref = result[0]['genome_ref']\n self.assertTrue(ref, 'Genome ref exists')\n info = result[0]['genome_info']\n typ = info[2]\n self.assertTrue(typ.startswith('KBaseGenomes.Genome'))\n info_details = info[-1]\n self.assertEqual(info_details['Taxonomy'], (\n \"cellular organisms;Eukaryota;Viridiplantae;\"\n \"Streptophyta;Streptophytina;Embryophyta;Tracheophyta;\"\n \"Euphyllophyta;Spermatophyta;Magnoliopsida;Mesangiospermae;\"\n \"eudicotyledons;Gunneridae;Pentapetalae;rosids;malvids;\"\n \"Brassicales;Brassicaceae;Camelineae;Arabidopsis\"\n ))\n self.assertEqual(info_details['Size'], '697724')\n self.assertEqual(info_details['Source'], 'Genbank')\n self.assertEqual(info_details['Name'], 'Wigglesworthia glossinidia endosymbiont of Glossina brevipalpis')\n self.assertEqual(info_details['GC content'], '0.22479')\n self.assertEqual(info_details['Genetic code'], '11')\n self.assertEqual(info_details['Number of Genome Level Warnings'], '1')\n self.assertEqual(info_details['Source ID'], 'BA000021')\n self.assertEqual(info_details['Number of Protein Encoding Genes'], '20')\n self.assertEqual(info_details['Domain'], 'Eukaryota')\n self.assertTrue(info_details['Assembly Object'])\n self.assertEqual(info_details['Number contigs'], '1')\n self.assertEqual(info_details['Number of CDS'], '20')\n self.assertTrue(info_details['MD5'])", "def _load_genes(self):\n with open(self.gene_file_path, 'r') as gene_file:\n csv_reader = csv.reader(gene_file, delimiter=',')\n for gene in csv_reader:\n yield (gene[self.GENE_NAME_IDX], gene[self.GENE_ID_IDX])", "def get_genre_similarity(self):\n genre_words = []\n for w in self.target_movie.genres.split('|'):\n w = w.strip('- ,:(){}[]')\n genre_words.append(w)\n\n print(genre_words)\n\n res = self.db.query(Movie).filter(\n Movie.movie_id != self.target_movie.movie_id).filter(\n Movie.movie_id.in_(self.recommendation_pool.keys())\n ).filter(or_(\n Movie.genres.ilike(r'%' + gw + r'%') for gw in genre_words\n )).all()\n\n print(\"%i records from partial genres match\" % len(res))\n GSW = self.GENRES_SIMILARITY_WEIGHT\n for rec in res:\n smid = rec.movie_id\n self.recommendation_pool[smid]['genres_similarity'] = \\\n jaccard_index(self.target_movie.genres, rec.genres, '|') * GSW", "def find_genres(genre_dom, dom):\n # take the first genre and turn it into a string\n genre = str(genre_dom)[3:-1]\n\n # see if there are more genres to a movie\n next_genre = dom.find(\"div\", itemprop=\"genre\").a.find_next_sibling(\"a\")\n\n # add the new genres to the string\n while(next_genre):\n temp = next_genre.get_text().encode(\"utf-8\")\n genre = genre + \"; \" + \"\" + str(temp)[3:-1]\n next_genre = next_genre.find_next_sibling(\"a\")\n return genre", "def mutate(self, genes: Dict[str, List[int]]) -> Dict[str, List[int]]:\n \n genes = deepcopy(genes)\n for key in genes.keys():\n # the mutation gets a set number of genes from the length of the genome and changes them to\n # new random choices. This number is proportional to the mutation probability\n mutated_indexes = random.choices(range(self.ref_count[key]), k=int(self.mutation_prob * self.ref_count[key]))\n genes[key][mutated_indexes] = numpy.random.randint(0, len(self.grammar[key]))\n\n return genes", "def __encode_genres(self, df):\n\t\tself.genre_mlb = MultiLabelBinarizer()\n\t\tdf = df.join(pd.DataFrame(self.genre_mlb.fit_transform(df.pop(\"genres\")), columns=self.genre_mlb.classes_,\n\t\t\t\t\t\t\t\t index=df.index),\n\t\t\t\t\t lsuffix='l')\n\t\treturn df", "def genotype(args) :\n from genotyper import genotype_samples\n genotype_samples(args)", "def genre_key(genre_name=DEFAULT_GENRE):\n return ndb.Key('Genre', genre_name.lower())", "def genes(self) -> Tuple[Gene, ...]:\n return tuple(self.influence_graph.genes)", "def add_genre(self, tag: Genre):\n raise NotImplementedError", "def _genres_to_dummy(self):\n\n def build_column(data, name):\n \"\"\" Builds the input column taking into account the genes list \"\"\"\n return data['genre'].apply(lambda l: name in l)\n\n # Create column for each genre\n for g in self.genres:\n self.movies[g] = build_column(self.movies, g)\n # Delete original one\n self.movies = self.movies.drop('genre', 1)", "def add_genre(self, gid: str, gen: str):\n if self.sess.query(exists().where(Genre.genre_id == gid or Genre.genre == gen)).scalar():\n return\n self.logging.info(f\"adding genre: {gen} with id {gid}\")\n genre = Genre(gid=uuid4().hex,\n genre_id=gid,\n genre=gen)\n self.sess.add(genre)\n self.sess.commit()", "def nsrGenera(taxonList, synonymList):\r\n species = list(filter(None, sorted(taxonList + synonymList)))\r\n generaList = [i.split()[0] for i in species]\r\n generaList = list(dict.fromkeys(generaList))\r\n return generaList", "def split_genres(self, data_frame):\n genre_data_frame = data_frame['genres'].str.split('|', expand=True).stack()\n genre_data_frame.name = \"genre\"\n genre_data_frame.index = genre_data_frame.index.droplevel(-1)\n genre_data_frame = genre_data_frame.reset_index()\n data_frame = data_frame.drop(\"genres\", axis=1)\n data_frame = data_frame.reset_index()\n data_frame = genre_data_frame.merge(data_frame, how=\"left\", on=\"index\")\n return data_frame", "def find_genre_playlists(data):\n playlists = []\n\n if data['genre']:\n playlists += data['genre']\n\n if data['comments']:\n playlists += data['comments']\n\n matches = re.findall('\\(\\s*(cover|live|unplugged|acoustic|remix|instrumental)', data['title'].lower())\n if matches:\n if 'cover' in matches:\n matches.remove('cover')\n matches += ['covers']\n\n if 'acoustic' in matches:\n matches.remove('acoustic')\n matches += ['unplugged']\n\n if 'remix' in matches:\n matches.remove('remix')\n matches += ['remix']\n\n if 'instrumental' in matches:\n matches.remove('instrumental')\n matches += ['instrumental']\n\n playlists += matches\n\n return set([x for x in playlists if x != 'none'])", "def get_movies(genre: str):\n with MongoClient(uri) as client:\n movie_collection = client[DB][MSG_COLLECTION]\n msg_list = movie_collection.find({\"genres\": genre}).limit(100)\n movie_title_list = []\n for msg in msg_list:\n movie_title_list.append(msg[\"title\"])\n return movie_title_list", "def get_game_genre(game_name: str) -> str:\n return df.loc[df['Name'] == game_name][['Name','Genre']]", "def _get_unique_genres(connection):\n print('---Getting unique genres---')\n genreDict = {}\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM shared_genres;\")\n res = cursor.fetchall()\n num_genres = 0\n for genre in res:\n if genre[1] not in genreDict:\n genreDict[genre[1]] = num_genres\n num_genres += 1\n return genreDict", "def _get_genes(self, genes: Union[str, List[str]]) -> List[str]:\n if isinstance(genes, str):\n up = pd.read_table(genes, header=None, comment=\"#\", dtype=str)\n ups= up.values.astype(str)\n ups = list(np.squeeze(ups))\n elif isinstance(genes, (list, tuple)):\n ups = genes\n else:\n raise Exception(\"genes must be filepath, list or tuple\")\n # filter genes\n ups_new = [str(i) for i in ups if str(i) in self.genes]\n\n if len(ups_new) < 1: \n raise Exception(\"No genes found. Please input proper Entrez id\")\n return ups_new", "def genes_GT():\n df1=pd.read_csv(config['geneInfo'], sep=\" \")\n df1=df1[df1.chr == '22']\n df2=pd.read_csv(config['counts'], sep=\" \")\n genes=df1.merge(df2.gene_id, on=\"gene_id\")\n return list(set(genes['gene_id']))" ]
[ "0.73216236", "0.73034143", "0.73029256", "0.72081995", "0.6950199", "0.67357016", "0.67327183", "0.66221786", "0.66032404", "0.6553856", "0.64852357", "0.64766", "0.646072", "0.6298506", "0.6285007", "0.626418", "0.6256165", "0.62560207", "0.6246242", "0.62147844", "0.6140646", "0.6109675", "0.60990125", "0.6052731", "0.6040875", "0.6028075", "0.59938735", "0.5978008", "0.5948246", "0.59338903", "0.59305894", "0.5913938", "0.58898735", "0.58728236", "0.58651686", "0.58443344", "0.5830918", "0.57641435", "0.57331103", "0.57179123", "0.5716153", "0.5696348", "0.5671943", "0.5670885", "0.56623924", "0.5659997", "0.5602399", "0.55979204", "0.5584049", "0.5583298", "0.55822796", "0.556117", "0.55180323", "0.55131024", "0.5510996", "0.5493215", "0.5477127", "0.5466438", "0.5453695", "0.5449902", "0.5443719", "0.54340935", "0.54206", "0.54085404", "0.5406749", "0.5371692", "0.53600264", "0.5341191", "0.53271335", "0.53184056", "0.5289534", "0.52750045", "0.5273782", "0.526844", "0.52460265", "0.520611", "0.52003825", "0.51890707", "0.5175942", "0.5174267", "0.5162129", "0.51617277", "0.51611584", "0.5154867", "0.5150852", "0.5131147", "0.511884", "0.5113622", "0.50943553", "0.50921625", "0.5078865", "0.5076164", "0.5065492", "0.5061828", "0.50562847", "0.50557166", "0.5039091", "0.50378937", "0.50375485", "0.5017969" ]
0.7478756
0
Gets and sets the language
def language(self): if "language" in self._prop_dict: return self._prop_dict["language"] else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_language(self):\r\n return self.language", "def get_language(self):\n return self.lang", "def get_language(self):\n return self.language if self.language is not None else get_language()", "def language(self):\r\n return self._get('language', {})", "def get_language(self) -> str:\n return self.language", "def _getLang(self, language):\n if language == None:\n language = self.getDefaultLanguage()\n\n return language", "def language(self):\n lang = None\n if self.__dict__['TAG:language']:\n lang = self.__dict__['TAG:language']\n return lang", "def language(self):\n return self._language", "def language(self):\n return self._language", "def language(self):\n return self._language", "def language(self):\n return self._language", "def setLanguage(self, value):\n return self._call_java(\"setLanguage\", value)", "def getLanguage(self):\n return self.getOrDefault(self.language)", "def get_language(self) -> str:\n return settings.LANGUAGE_CODE", "def set_language(self, lang):\n self.lang = lang", "def get_language(self, article):\r\n # we don't want to force the target laguage\r\n # so we use the article.meta_lang\r\n if self.config.use_meta_language == True:\r\n if article.meta_lang:\r\n self.language = article.meta_lang[:2]\r\n self.language = self.config.target_language", "def Language(self, default=None):\n return self.data.get('language', default)", "def language(self, language: str):\n self._language = language", "def get_lang(self):\n return self.langs.lang", "def language(self):\n if self.consent:\n self.consent.language\n translation.activate(self.consent.language)\n self._language = translation.get_language()\n else:\n self._language = settings.LANGUAGE_CODE\n return self._language", "def language(self):\n # type: () -> string_types\n return self._language", "def language(self):\n # type: () -> string_types\n return self._language", "def lang(self):\n return self._lang", "def SetLanguage(self, language):\n try:\n newDict = guicmd.CommandInterface.MessageHandler.GetLanguageDict(language)\n if newDict:\n self.languageDict = newDict\n self.language = language\n except:\n pass", "def language(self) -> str:\n return self._language", "def language(self) -> str:\n return self._language", "def init_language(self):\n\n if 'HTTP_COOKIE' in os.environ:\n cookies = os.environ['HTTP_COOKIE'].split(';')\n for cookie in cookies:\n (key, value) = cookie.split('=')\n if key == Intuition.COOKIE_USERLANG:\n return value\n \n return self.default_language", "def language(self, language: str):\n\n self._language = language", "def language(self, language):\n\n self._language = language", "def language(self, language):\n\n self._language = language", "def language(self, language):\n\n self._language = language", "def language(self, language):\n\n self._language = language", "def language(self, language):\n\n self._language = language", "def get_lang(self):\n props = getToolByName(self.context,\n 'portal_properties')\n return props.site_properties.getProperty('default_language') or 'en'", "def _get_lang(self, *args, **kwargs):\n if \"lang\" in kwargs:\n if kwargs[\"lang\"] in self._available_languages:\n self.lang = kwargs[\"lang\"]", "def do_lang(self, lang):\n\n self.lang = lang\n print(\"Set language to %s\" % lang)", "def get_language(self, article):\r\n # we don't want to force the target laguage\r\n # so we use the article.meta_lang\r\n if self.config.use_meta_language == True:\r\n if article.meta_lang:\r\n return article.meta_lang[:2]\r\n return self.config.target_language", "def get_localization(self, language: str) -> Localization:\n ...", "def language(self, target):\n self._check_target(target)\n return target.language or self._default_language", "def language(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"language\")", "def language(self, text_language):\n language = text_language.strip().lower()\n if language in LANGUAGE_TO_CODE:\n self._language_code = LANGUAGE_TO_CODE[language]\n else:\n self._language_code = language[:2]", "def get_language_name(self):\n return self.language_name", "def get_language():\n try:\n from leaves.middleware import request_context\n return request_context.language\n except:\n return get_site().preferences.default_language", "def __translationLanguage(self):\n return self.transLanguageComboBox.itemData(\n self.transLanguageComboBox.currentIndex())", "def language(self) -> str:\n if self.language_code in CODE_TO_LANGUAGE:\n return CODE_TO_LANGUAGE[self.language_code]\n\n return self.language_code", "def get_lang(self):\n\n path = self.get_lang_path()\n for language in self.languages:\n if language in path:\n return language", "def language_code(self) -> str:\n return pulumi.get(self, \"language_code\")", "async def language(self, ctx, language: str = None):\n if language is None:\n lang = await self.bot.di.get_language(ctx.guild)\n await ctx.send((await _(ctx, \"The guild language is set to {}\")).format(lang))\n else:\n if language not in self.bot.languages:\n await ctx.send(await _(ctx, \"That is not a valid language!\"))\n return\n await self.bot.di.set_language(ctx.guild, language)\n await ctx.send(await _(ctx, \"Language successfully set!\"))", "def default_language(self):\n return self._default_language", "def translate(self, language=None):", "def defaultLanguage(self, lang=None):\n if(lang is not None):\n self.lang = lang\n return self.lang", "def GetMUILanguage(self):\n mui_resource = self.GetMUIResource()\n if not mui_resource:\n return None\n\n return mui_resource.language", "def requestLanguage(request):\n # Return the user language preferences for registered users\n if request.user.valid and request.user.language:\n return request.user.language\n\n # Or try to return one of the user browser accepted languages, if it\n # is available on this wiki...\n available = wikiLanguages()\n if not request.cfg.language_ignore_browser:\n for lang in browserLanguages(request):\n if lang in available:\n return lang\n \n # Or return the wiki default language...\n if request.cfg.language_default in available:\n lang = request.cfg.language_default\n # If everything else fails, read the manual... or return 'en'\n else:\n lang = 'en'\n return lang", "def to_language(self):\n return self.language()", "def language_code(self):\n return self._language_code", "def get_meta_lang(self):\n # we have a lang attribute in html\n attr = self.parser.getAttribute(self.article.doc, attr='lang')\n if attr is None:\n # look up for a Content-Language in meta\n items = [\n {'tag': 'meta', 'attr': 'http-equiv', 'value': 'content-language'},\n {'tag': 'meta', 'attr': 'name', 'value': 'lang'}\n ]\n for item in items:\n meta = self.parser.getElementsByTag(self.article.doc, **item)\n if meta:\n attr = self.parser.getAttribute(meta[0], attr='content')\n break\n\n if attr:\n value = attr[:2]\n if re.search(RE_LANG, value):\n return value.lower()\n\n return None", "def set_default_language(language_code):\n thread_locals.DEFAULT_LANGUAGE = language_code", "def language(self, language):\n if self.local_vars_configuration.client_side_validation and language is None: # noqa: E501\n raise ValueError(\"Invalid value for `language`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n language is not None and len(language) < 1):\n raise ValueError(\"Invalid value for `language`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._language = language", "def set_language(self, lang):\n\n self.language = lang\n\n self.add_metadata('DC', 'language', lang)", "def get_language(self, language):\n found_lang = None\n for lang in self.catalog['languages']:\n if lang['identifier'] == language['identifier']:\n found_lang = lang\n break\n if not found_lang:\n self.catalog['languages'].append(language)\n else:\n language = found_lang\n if 'resources' not in language:\n language['resources'] = []\n return language", "def get_language(lang_list: list = None) -> str:\n\tis_logged_in = frappe.session.user != \"Guest\"\n\n\t# fetch language from form_dict\n\tif frappe.form_dict._lang:\n\t\tlanguage = get_lang_code(frappe.form_dict._lang or get_parent_language(frappe.form_dict._lang))\n\t\tif language:\n\t\t\treturn language\n\n\t# use language set in User or System Settings if user is logged in\n\tif is_logged_in:\n\t\treturn frappe.local.lang\n\n\tlang_set = set(lang_list or get_all_languages() or [])\n\n\t# fetch language from cookie\n\tpreferred_language_cookie = get_preferred_language_cookie()\n\n\tif preferred_language_cookie:\n\t\tif preferred_language_cookie in lang_set:\n\t\t\treturn preferred_language_cookie\n\n\t\tparent_language = get_parent_language(language)\n\t\tif parent_language in lang_set:\n\t\t\treturn parent_language\n\n\t# fetch language from request headers\n\taccept_language = list(frappe.request.accept_languages.values())\n\n\tfor language in accept_language:\n\t\tif language in lang_set:\n\t\t\treturn language\n\n\t\tparent_language = get_parent_language(language)\n\t\tif parent_language in lang_set:\n\t\t\treturn parent_language\n\n\t# fallback to language set in System Settings or \"en\"\n\treturn frappe.db.get_default(\"lang\") or \"en\"", "def language(self):\n portal_state = self.context.unrestrictedTraverse(\"@@plone_portal_state\")\n return aq_inner(self.context).Language() or portal_state.default_language()", "def set_language(request):\r\n user = request.user\r\n lang_pref = request.POST.get('language', None)\r\n\r\n if lang_pref:\r\n UserPreference.set_preference(user, LANGUAGE_KEY, lang_pref)\r\n return HttpResponse('{\"success\": true}')\r\n\r\n return HttpResponseBadRequest('no language provided')", "def setRobotLanguage(self):\n\n try:\n assert self.languageTag in self.tts.getSupportedLanguages()\n self.tts.setLanguage(self.languageTag)\n\n except AssertionError:\n self.logger.warning(self.languageTag + \" is not supported by the robot, language set \"\\\n \"to English\")\n\n self.tts.setLanguage(self.ENGLISH_TAG)", "def get_locale():\n setting = Setting.query.filter(Setting.name == 'default_language').first()\n\n if setting is not None:\n return setting.value\n\n # Return default language when none found\n return 'en'", "def default_language(self) -> str:\n return self.raw_config.get(\"default_language\", \"en\")", "def get_lang(ix):\n\tlang = None\n\tif ix == 0:\n\t\tlang = setting.TLA_ENG\n\telif ix == 1:\n\t\tlang = setting.TLA_JP\n\telse:\n\t\tlang = setting.TLA_VN\n\n\tf = open (f\"lang\\\\{lang}.json\", encoding=setting.TLA_UTF8)\n\tglobal data_json\n\tdata_json = json.load(f)\n\n\treturn lang", "def test_set_language(self):\n # Test for default languages\n self.assertEqual(self.scraper.language_original, 'jpn')\n self.assertEqual(self.scraper.language_translated, 'eng')\n\n # Test after setting supported languages\n self.scraper.set_languages('jpn', 'eng')\n self.assertEqual(self.scraper.language_translated, 'jpn')\n self.assertEqual(self.scraper.language_original, 'eng')\n\n # Test after setting non-supported languages\n self.scraper.set_languages('eng', 'lol')\n self.assertEqual(self.scraper.language_translated, 'jpn')\n self.assertEqual(self.scraper.language_original, 'eng')", "def setLanguage(self, translator: ghidra.program.util.LanguageTranslator, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...", "def load_language(self, language_file):\n try:\n if self._translator is not None:\n self.removeTranslator(self._translator)\n\n self.language_name = 'en_us'\n if os.path.isfile(language_file):\n self._translator = QTranslator() # I18N 관련\n self._translator.load(language_file)\n self.installTranslator(self._translator)\n self.language_name = os.path.splitext(os.path.basename(language_file))[0]\n finally:\n pass", "def get_language(chat_id):\n db_connection = DatabaseConnection()\n language = db_connection.get_setting(chat_id=chat_id, setting=LANGUAGE_SETTING)\n db_connection.close()\n\n return language", "def set_lang(self, lang: LangEnum) -> None:\n self._logger.debug(\"running\")\n self._base_strings = strings[lang]\n self._logger.debug(\"done\")", "def setlang(request):\n next = request.GET.get('next', None)\n if not is_safe_url(url=next, host=request.get_host()):\n next = request.META.get('HTTP_REFERER')\n if not is_safe_url(url=next, host=request.get_host()):\n next = '/'\n response = redirect(next)\n\n lang_code = request.GET.get('language', None)\n if lang_code and check_for_language(lang_code):\n if hasattr(request, 'session'):\n request.session[LANGUAGE_SESSION_KEY] = lang_code\n else:\n response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,\n max_age=settings.LANGUAGE_COOKIE_AGE,\n path=settings.LANGUAGE_COOKIE_PATH,\n domain=settings.LANGUAGE_COOKIE_DOMAIN)\n\n return response", "def get_locale(self):\n return self.locale", "def language(self, language):\n if language is None:\n raise ValueError(\"Invalid value for `language`, must not be `None`\")\n allowed_values = [\"en_US\", \"en_UK\", \"en\", \"es\"]\n if language not in allowed_values:\n raise ValueError(\n \"Invalid value for `language` ({0}), must be one of {1}\"\n .format(language, allowed_values)\n )\n\n self._language = language", "def audio_language(self):\n # type: () -> string_types\n return self._audio_language", "def getLang(self):\n ret = libxml2mod.xmlNodeGetLang(self._o)\n return ret", "def get_language(self, word, lang=None):\n lang = lang or self.cfg.get('lang', 'en')\n # let's retrieve the word from configuration dict.\n try:\n return self.cfg['words_' + lang][word]\n except StandardError:\n return 'Do not know how to \"{}\" in \"{}\"'.format(word, lang)", "def _save_lang(self):\n for combobox, (option, _default) in list(self.comboboxes.items()):\n if option == 'interface_language':\n data = combobox.itemData(combobox.currentIndex())\n value = from_qvariant(data, to_text_string)\n break\n save_lang_conf(value)\n self.set_option('interface_language', value)", "def getVKBLanguage(self):\r\n\r\n return self.phone.sx('(send (send (get-input-locale-manager) get-current-locale) get-iso)', convertToString=False)", "def get_locale():\n return \"he\"", "def use_en(self):\n pass", "def default_language(self, default_language):\n self._default_language = default_language", "def _change_lang(self):\n lang = self.ddnGuiLanguage.get()\n self.lblProject['text'] = LOCALIZED_TEXT[lang]['Current Project>'] + \\\n ' ' + self.ddnCurProject.get()\n\n pass", "def set_language(request):\n response = HttpResponseRedirect(get_redirect_url(request))\n\n if request.method == 'POST':\n lang_code = request.POST.get('language', None)\n if lang_code and check_for_language(lang_code):\n request.session[settings.LANGUAGE_SESSION_KEY] = lang_code\n\n return response", "def get_full_language(self, language):\n if language:\n language = pycountry.languages.get(alpha_2=language)\n if language:\n language = language.name\n return language.title()", "def get_default_language():\n return getattr(thread_locals, 'DEFAULT_LANGUAGE',\n settings.DEFAULT_LANGUAGE)", "def change_language(self, language=None, from_error=False):\n supported_langs = self.languages_and_comments.keys()\n if from_error:\n print _(\"Unsupported language. Available languages are:\")\n for lang in supported_langs:\n print \" - \", lang\n if not language:\n language = raw_input(_(\"Language: \"))\n if not language in supported_langs:\n self.change_language(None, True)\n else:\n self.default_lang = language", "def get_locale(self):\n raise Unimplemented()", "def __init__(self, language=None):\n self._language = language", "def set_i18n(lang, language=None):\n import gettext\n import locale\n import warnings\n import os\n\n try:\n locale.setlocale(locale.LC_ALL, lang)\n locale.setlocale(locale.LC_MESSAGES, language or lang)\n os.environ[\"LANG\"] = lang\n os.environ[\"LANGUAGE\"] = language or lang.split(\".\")[0]\n except locale.Error:\n warnings.warn(f\"locale is not supported: {lang}\")\n gettext.bindtextdomain(\"messages\", localedir=LOCALEDIR)", "def language(self, language):\n # type: (string_types) -> None\n\n if language is not None:\n if not isinstance(language, string_types):\n raise TypeError(\"Invalid type for `language`, type has to be `string_types`\")\n\n self._language = language", "def language(self, language):\n # type: (string_types) -> None\n\n if language is not None:\n if not isinstance(language, string_types):\n raise TypeError(\"Invalid type for `language`, type has to be `string_types`\")\n\n self._language = language", "def srclang(self):\n return self.__srclang", "def prefer_macrolanguage(self) -> 'Language':\n if self._macrolanguage is not None:\n return self._macrolanguage\n language = self.language or 'und'\n if language in NORMALIZED_MACROLANGUAGES:\n self._macrolanguage = self.update_dict({\n 'language': NORMALIZED_MACROLANGUAGES[language]\n })\n else:\n self._macrolanguage = self\n return self._macrolanguage", "def get_locale():\n if (session.get(\"language\") is not None):\n return session.get('language')['charcode']\n return request.accept_languages.best_match(app.config['LANGUAGES'].keys())", "def gpwDefaultLanguage(self):\n parent = self.getFolderWhenPortalFactory()\n if hasattr(parent, 'getRawLanguage') and parent.getRawLanguage():\n return parent.getRawLanguage()\n tool = getToolByName(self, 'portal_languages', None)\n if tool is not None:\n return tool.getDefaultLanguage()\n return config.LANGUAGE_DEFAULT", "def getLang(lang, localedir=os.path.expanduser(\"~\") + \"/share/locale\"):\n return gettext.translation(\"bridgedb\", localedir=localedir, \n languages=[lang], fallback=\"en\")", "def fetchTranslation(self, language):\n pass", "def assoc_language(self):\n # type: () -> string_types\n return self._assoc_language" ]
[ "0.81069374", "0.8000656", "0.79582477", "0.7830918", "0.77831334", "0.77229893", "0.74865913", "0.74720967", "0.74720967", "0.74720967", "0.74720967", "0.7469725", "0.74637824", "0.7453111", "0.7450087", "0.736944", "0.73569506", "0.7350354", "0.7332747", "0.726062", "0.7256792", "0.7256792", "0.72221273", "0.71793157", "0.7174731", "0.7174731", "0.7125812", "0.71097785", "0.71006155", "0.71006155", "0.71006155", "0.71006155", "0.71006155", "0.7090153", "0.7071917", "0.7029175", "0.69745827", "0.69737655", "0.69651616", "0.69437385", "0.69398403", "0.6937819", "0.6937623", "0.6902467", "0.6901644", "0.6884023", "0.6882263", "0.68228203", "0.68222845", "0.6803285", "0.679662", "0.67775005", "0.6648548", "0.6617035", "0.6615778", "0.660266", "0.659925", "0.6588048", "0.6587163", "0.6581328", "0.6575724", "0.65628713", "0.65554965", "0.65338403", "0.6480563", "0.64775234", "0.64721483", "0.6465616", "0.6463126", "0.6442514", "0.64325804", "0.6407301", "0.6392924", "0.638916", "0.6387678", "0.63622135", "0.63499624", "0.63494575", "0.63375413", "0.6336271", "0.630182", "0.6300991", "0.6284923", "0.6276805", "0.6269869", "0.62582326", "0.62378836", "0.62323445", "0.6231001", "0.6228225", "0.6224825", "0.6218029", "0.6218029", "0.6208386", "0.6198345", "0.61967653", "0.61906654", "0.6180141", "0.6178203", "0.61761105" ]
0.75225323
6
Gets and sets the seller
def seller(self): if "seller" in self._prop_dict: return self._prop_dict["seller"] else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def seller(self, seller):\n\n self._seller = seller", "def get_contact_seller(self):\n return self.contact.seller", "def buyer(self, buyer):\n\n self._buyer = buyer", "def seller_from_user(context: Dict[str, Any]) -> Optional[Seller]:\n user: User = context[\"user\"]\n if user.is_authenticated:\n try:\n seller = Seller.objects.get(username=user.username)\n except:\n seller = None\n return seller\n return None", "def get_seller_id(vehicle, api):\n seller = vehicle[SELLER]\n try:\n address = ADDRESS_FORMAT.format(seller[STREET_ADDRESS], seller[CITY], seller[STATE])\n except KeyError:\n send_slack_message(text=f'Address error for seller: {seller} and vehicle: {vehicle}')\n return -1\n\n # Search for existing seller\n db_seller = api.seller_get(address=address)\n if db_seller == -1:\n return -1\n elif len(db_seller) >= 1:\n return db_seller[0]['id']\n\n # New seller, add it to sellers table\n payload = {\n 'phone_number': seller.get(PHONE_NUMBER),\n 'name': seller[NAME],\n 'address': address,\n 'latitude': seller.get(LAT),\n 'longitude': seller.get(LNG),\n }\n new_seller = api.seller_post(**payload)\n return new_seller['id'] if new_seller != -1 else -1", "def selling_rate(self, selling_rate):\n\n self._selling_rate = selling_rate", "def sell(self):\n self.status = \"sold\"\n return self", "def create_seller(self, order_items_with_sellers):\n seller = {}\n\n for item in order_items_with_sellers:\n\n item_seller = item.pop(\"seller\")\n\n seller['seller_uid'] = item_seller['Order Item Seller Uid']\n seller['seller_unique_code'] = item_seller['Order Item Seller Code']\n seller['seller_name'] = item_seller['Order Item Seller Name']\n seller['seller_company'] = item_seller['Order Item Seller Company']\n seller['seller_email'] = item_seller['Order Item Seller Email']\n\n item['seller'] = copy.deepcopy(seller)\n seller.clear()\n\n return order_items_with_sellers", "def sells(self, sells):\n\n self._sells = sells", "def sell_currency(self, sell_currency):\n\n self._sell_currency = sell_currency", "def sellTradedVal(self, sellTradedVal):\n\n self._sellTradedVal = sellTradedVal", "def on_change_seller_id(self):\n value = {}\n if self.seller_id:\n start_date = datetime.now() + timedelta(\n days=self.seller_id.fba_vcs_report_days * -1 or -3)\n value.update({'start_date': start_date, 'end_date': datetime.now()})\n return {'value': value}", "def ReflectingSeller(Seller):\n increase_step = 0.01\n\n if Seller.has_sold == True:\n Seller.like_sell *= (1+increase_step)\n elif Seller.like_sell * (1-increase_step) <= Seller.min_value and Seller.has_sold == False:\n Seller.like_sell = Seller.min_value\n else: \n Seller.like_sell *= (1-increase_step)\n Seller.has_sold = False #return to normal state", "def sell_stock (self, ticker, sell_date):\n \n self.__validate_sell__() \n self.__get_sell_share_price__(ticker, sell_date)\n self.__calc_profit_from_sales__() \n self.__update_sell_delta_amount__()\n self.__save_sell__()\n\n del self.invested[ticker]", "def get_seller_surname(self, id):\n try:\n MySQLConnector().execute_query('select surname from salemen where id = {0};'.format(id))\n surname = MySQLConnector().get_results()[0][0]\n except Error as er:\n logging.getLogger(__name__).error(\"Something went wrong with database %s\" % er)\n return surname", "def show_seller_profile(request, seller_username):\n\tcontext_dict = {}\n\tsearch_form = Search_bar()\n\tcontext_dict['search_bar'] = search_form\n\n\tseller_user = get_object_or_404(User, username = seller_username)\n\tcontext_dict['seller_user'] = seller_user\n\t\n\tseller_user_profile=get_object_or_404(UserProfile, user=seller_user)\n\tselling = Item.objects.filter(seller = seller_user_profile)\n\tcontext_dict['selling'] = selling[0:3]\n\t\n\tseller_user_profile = get_object_or_404(UserProfile, user = seller_user)\n\tcontext_dict['seller_user_profile'] = seller_user_profile\n\tcontext_dict['seller_rating'] = range(int(round(seller_user_profile.rating, 1)))\n\n\tseller_items = Item.objects.filter(seller = seller_user_profile)\n\titemList = []\n\n\tfor item in seller_items:\n\t\tif item.sold_to == None:\n\t\t\titemList.append(item)\n\n\tcontext_dict['seller_items'] = itemList\n\treviews_seller = Review.objects.filter(Q(item__in = Item.objects.filter(seller = seller_user_profile)))\n\tcontext_dict['reviews_seller'] = reviews_seller.order_by('-datePosted')\n\n\tif request.user.is_authenticated():\n\t\titems_reviewed = []\n\t\tfor review in Review.objects.select_related():\n\t\t\titems_reviewed.append(review.item.itemID)\n\t\t\n\t\titems_to_review = Item.objects.filter(Q(sold_to__in = UserProfile.objects.filter( \n\t\t\t\t\t\t\t\t\t\t\t\t\tuser__in = User.objects.filter(username = request.user))) &\n\t\t\t\t\t\t\t\t\tQ(seller = seller_user_profile)\n\t\t\t\t\t\t\t\t\t).exclude(itemID__in = items_reviewed)\n\t\tcontext_dict['items_to_review'] = items_to_review\n\n\t\tif items_to_review:\n\t\t\tform = ReviewForm(user_items = items_to_review)\n\t\t\tif(request.method == 'POST'):\n\t\t\t\tform = ReviewForm(items_to_review, request.POST)\n\t\t\t\tif form.is_valid():\n\t\t\t\t\treview = form.save()\n\t\t\t\t\t# Remove item just reviewed from the items to review\n\t\t\t\t\tcontext_dict['items_to_review'] = items_to_review.exclude(itemID = review.item.itemID)\n\n\t\t\t\t\t# Handle if there are more items to review\n\t\t\t\t\tif context_dict['items_to_review']:\n\t\t\t\t\t\tcontext_dict['form'] = ReviewForm(user_items = context_dict['items_to_review'])\n\n\t\t\t\t\treviews_seller_updated = Review.objects.filter(Q(item__in = Item.objects.filter(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tseller = seller_user_profile)))\n\t\t\t\t\trating = 0\n\t\t\t\t\t\n\t\t\t\t\tfor review_updated in list(reviews_seller_updated):\n\t\t\t\t\t\trating += review_updated.rating\n\t\t\t\t\t\n\t\t\t\t\trating = round(rating/len(reviews_seller_updated), 1)\n\t\t\t\t\tseller_user_profile.rating = rating\n\t\t\t\t\tseller_user_profile.save()\n\n\t\t\t\t\treturn HttpResponseRedirect(reverse('tailored:show_seller_profile',\n\t\t\t\t\t\t\tkwargs = {'seller_username': seller_username}))\n\n\t\t\tcontext_dict['form'] = form\n\treturn render(request, 'tailored/seller_profile.html', context_dict)", "def merchant(self):\n return self.__merchant", "def set_sell_amount_from_buy_amount(self, *args, **kwargs):\n self._sell_amount = self.get_sell_amount_from_buy_amount(*args, **kwargs)", "def assignDealer(self):\n\t\t_, index = self.findNthPlayerFromSeat(self.curDealerSeatNo, 1)\n\t\tself.curDealerSeatNo = index", "def get_buy_and_sell_deal_name(self):\n global buy_and_sell_deal\n return buy_and_sell_deal", "def SellingPrice(self):\n return self.selling_price", "def book_for_sale(self):\n try:\n return self.book_set.filter(book_type=get_model('books', 'Book').TO_SELL)[0]\n except:\n None", "def sellItem(self, itemType, key):\n\t\turl = \"https://habitica.com/api/v3/user/sell/\" + itemType + \"/\" + key\n\t\treturn(postUrl(url, self.credentials))", "def get_seller_name(self, id):\n try:\n MySQLConnector().execute_query('select name from salemen where id = {0};'.format(id))\n name = MySQLConnector().get_results()[0][0]\n MySQLConnector().execute_query('select surname from salemen where id = {0};'.format(id))\n surname = MySQLConnector().get_results()[0][0]\n MySQLConnector().execute_query('select surname from salemen where id = {0};'.format(id))\n surname = MySQLConnector().get_results()[0][0]\n name_surname = name +', ' + surname\n except Error as er:\n logging.getLogger(__name__).error(\"Something went wrong with database %s\" % er)\n return name_surname", "def sellOpenVal(self, sellOpenVal):\n\n self._sellOpenVal = sellOpenVal", "def market(self, market):\n self._market = market", "def set_follower(self, follower):\n self.follower = follower", "def Besucher(self):\n return self.getAnsprechpartner()", "def set_receiver(self, receiver):\n self.receiver = receiver", "def receiver(self, receiver: str):\n if receiver is None:\n raise ValueError(\"Invalid value for `receiver`, must not be `None`\") # noqa: E501\n\n self._receiver = receiver", "def vendor(self):\n return self._vendor", "def issuer(self, issuer: str):\n\n self._issuer = issuer", "def shopkeeper(self, shopkeeper):\n\n self._shopkeeper = shopkeeper", "def target_sell_price(self):\n return super(Player, self).target_sell_price", "def Trading(Seller,Buyer):\n if Seller.has_sold == False:\n if Buyer.like_buy >= Seller.like_sell:\n Seller.has_sold = True\n Buyer.has_bought = True\n Seller.sold_objects += 1\n Buyer.bought_objects += 1\n print('A trade has been made')\n else:\n Buyer.has_bought = False\n Seller.has_sold = False\n print('There was no deal')\n else:\n Buyer.has_bought = False", "def set_buy_and_sell_deal_name(self, buy_and_sell_deal_prefix):\n global buy_and_sell_deal\n buy_and_sell_deal = buy_and_sell_deal_prefix + self.random_string_generator(size=4)\n self.set_value_into_input_field(self.buy_and_sell_deal_textbox_locator, buy_and_sell_deal)", "def sku(self):\n return self._sku", "def makes_offer(self, makes_offer: object):\n\n self._makes_offer = makes_offer", "def buying_rate(self, buying_rate):\n\n self._buying_rate = buying_rate", "def publisher(self):\n return self.get(\"publisher\")", "def sell_limit(self, market, quantity, rate):\n return self.api_query('Trade', {'type':'sell', 'pair': market, 'amount': quantity, 'rate':'%.8f'%rate})", "def selling_price(self):\n # If a system can't produce something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tp and _good.name not in 'fuel':\n sell_price = 0\n else:\n sell_price = self.standard_init_price()\n # raise a bit, randomized\n sell_price = sell_price + random.randrange(self.tradeitem.var)\n\n return int(sell_price)", "def get_sale_price(self):\n Currency = Pool().get('currency.currency')\n Company = Pool().get('company.company')\n\n if self.carrier_cost_method != 'gls':\n return super(Carrier, self).get_sale_price() # pragma: no cover\n\n currency, = Currency.search([('code', '=', 'EUR')])\n company = Transaction().context.get('company')\n\n if company:\n currency = Company(company).currency\n\n return Decimal('0'), currency.id", "def addSetAuthor(self,val):\n self.bookAuthor = val", "def prepare_amazon_request_report_kwargs(self, seller):\n account = self.env['iap.account'].search([('service_name', '=', 'amazon_ept')])\n dbuuid = self.env['ir.config_parameter'].sudo().get_param('database.uuid')\n instances_obj = self.env['amazon.instance.ept']\n instances = instances_obj.search([('seller_id', '=', seller.id)])\n marketplaceids = tuple(map(lambda x: x.market_place_id, instances))\n\n return {'merchant_id': seller.merchant_id and str(seller.merchant_id) or False,\n 'auth_token': seller.auth_token and str(seller.auth_token) or False,\n 'app_name': 'amazon_ept',\n 'account_token': account.account_token,\n 'dbuuid': dbuuid,\n 'amazon_marketplace_code': seller.country_id.amazon_marketplace_code or\n seller.country_id.code,\n 'marketplaceids': marketplaceids,\n }", "def sell(self):\n #TODO\n #hint: use the raise method to create an exception.\n if self.quantity < 1:\n raise SoldOutOfStockError(self.name)\n else:\n return 1\n # item getters", "def visit_give(self, give):\n owner_id = self.event_json['item_owner']['id']\n owner = self.world.entities[owner_id]\n give.item_owner = owner", "def sell_min_amount(self, sell_min_amount):\n\n self._sell_min_amount = sell_min_amount", "def vendor(self, vendor):\n\n self._vendor = vendor", "def best_sell(self):\n return Library.functions.best_sell(self._book)", "def get_sell_book(self, symbol=None, limit=100, offset=0): \r\n if symbol is None:\r\n sell_book = self.api.find(\"market\", \"sellBook\", query={\"account\": self.account}, limit=limit, offset=offset)\r\n else:\r\n sell_book = self.api.find(\"market\", \"sellBook\", query={\"symbol\": symbol, \"account\": self.account}, limit=limit, offset=offset)\r\n return sell_book", "def developer(self, developer):\n self._developer = developer", "def enterprise(self):\n return self._enterprise", "def add_sell(self, trade):\n trade = self._format_sql(trade, self.sell_table)\n self.sells[trade['id']] = trade", "def set_publisher (self, publisher):\n self.publisher = publisher", "def seller_transaction_fee(self) -> int:\n assert self._transaction_fees is not None, \"Transaction fee not set!\"\n return self._transaction_fees[\"seller_tx_fee\"]", "def getManufacturer(self):\n return self.manufacturer", "def get_price(self):\n return self.sale_price if self.sale_price else self.price", "def userSellWeaponObj(self, user : bbUser.bbUser, weapon : bbWeapon.bbWeapon):\n user.credits += weapon.getValue()\n self.weaponsStock.addItem(weapon)\n user.inactiveWeapons.removeItem(weapon)", "def _get_supplier_(obj, line):\n \n iNo = 0\n strRet = None\n for item in obj.order_line:\n iNo += 1\n if (item.id == line.id):\n if (len(item.product_id.seller_ids)>0):\n strRet = item.product_id.seller_ids[0] and item.product_id.seller_ids[0].name.name or None\n break\n \n \n return strRet", "def sellTradedQtyLot(self, sellTradedQtyLot):\n\n self._sellTradedQtyLot = sellTradedQtyLot", "def __init__(__self__, *,\n partner_name: str,\n secret: str,\n product_name: Optional[str] = None):\n pulumi.set(__self__, \"partner_name\", partner_name)\n pulumi.set(__self__, \"secret\", secret)\n if product_name is not None:\n pulumi.set(__self__, \"product_name\", product_name)", "def enterprise(self, enterprise):\n\n self._enterprise = enterprise", "def give_book(self):\n pass", "def getMarket(self):\n return self.market", "def sku(self, sku):\n\n self._sku = sku", "def _parse_marketplace_from_top_block(self, response):\n top_block = response.xpath('//*[contains(@id, \"sns-availability\")]'\n '//*[contains(text(), \"old by\")]')\n if not top_block:\n top_block = response.xpath('//*[contains(@id, \"merchant-info\")]'\n '[contains(text(), \"old by\")]')\n if not top_block:\n return\n\n seller_id = re.search(r'seller=([a-zA-Z0-9]+)\">', top_block.extract()[0])\n if not seller_id:\n seller_id = re.search(r'seller=([a-zA-Z0-9]+)&', top_block.extract()[0])\n if seller_id:\n seller_id = seller_id.group(1)\n\n sold_by_str = ''.join(top_block.xpath('.//text()').extract()).strip()\n sold_by_str = sold_by_str.replace('.com.', '.com').replace('\\t', '') \\\n .replace('\\n', '').replace('Gift-wrap available', '').replace(' .', '').strip()\n sold_by_whom = sold_by_str.split('by', 1)[1].strip()\n sold_by_whom = self._marketplace_seller_name_parse(sold_by_whom)\n if not sold_by_whom:\n self.log('Invalid \"sold by whom\" at %s' % response.url, ERROR)\n return\n product = response.meta['product']\n _marketplace = product.get('marketplace', [])\n _price = product.get('price', None)\n _currency = None\n _price_decimal = None\n if _price is not None:\n _price_decimal = float(_price.price)\n _currency = _price.priceCurrency\n _marketplace.append({\n 'currency': _currency or self.price_currency,\n 'price': _price_decimal if _price else None,\n 'name': sold_by_whom,\n 'seller_id': seller_id if seller_id else None,\n 'condition': 'new'\n })\n product['marketplace'] = _marketplace\n return product", "def offers(self, offers):\n\n self._offers = offers", "def ticker(self, ticker: str):\n\n self._ticker = ticker", "def test_get_additional_seller_inputs(self):\n pass", "def makes_offer(self) -> object:\n return self._makes_offer", "def ticker(self, ticker):\n\n self._ticker = ticker", "def test_get_small_and_light_eligibility_by_seller_sku(self):\n pass", "def get_publisher(self):\n return self.publisher", "def test_list_products_filtered_by_seller_name(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?seller=testuser1')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 1')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 1')", "def ticker(self, ticker):\n if ticker is None:\n raise ValueError(\"Invalid value for `ticker`, must not be `None`\")\n\n self._ticker = ticker", "def sell(self,\n currency_pair,\n rate,\n amount):\n pass", "def set_sales_rep(self, sales_rep):\r\n self._sales_rep = sales_rep", "def sell_max_amount(self, sell_max_amount):\n\n self._sell_max_amount = sell_max_amount", "def sales(self, sales):\n\n self._sales = sales", "def __init__(__self__, *,\n partner_name: str,\n secret: str):\n pulumi.set(__self__, \"partner_name\", partner_name)\n pulumi.set(__self__, \"secret\", secret)", "def orderSell(self, rate = None, amount = None):\r\n\t\treturn OrderSell(self, rate, amount)", "def receiver(self, receiver):\n\n self._receiver = receiver", "def receiver(self, receiver):\n\n self._receiver = receiver", "def __init__(self, merchant_username):\r\n\t\tself.merchant_username = merchant_username", "def dealer_hand_value(self):\n return self._get_hand_value(self.dealer_hand, allow_soft_limit=self.allow_soft_limit)", "def throwerOrg_update(self, data, sesh):\n\n\t\t# Verify fields\n\t\ttry: DictHelper.eval(data, ['org'])\n\t\texcept ValueError as e: return Services.Effect(error=(1001, [(f, \"missing\") for f in e.args]))\n\n\t\t# Find the thrower\n\t\toThrower = Thrower.get(sesh['thrower']['_id'])\n\t\tif not oThrower:\n\t\t\treturn Services.Effect(error=1104)\n\n\t\t# Set the new org\n\t\ttry:\n\t\t\toThrower['org'] = data['org']\n\t\texcept ValueError:\n\t\t\treturn Services.Effect(error=(1000, [('org', 'invalid')]))\n\n\t\t# Save\n\t\toThrower.save(changes={\"creator\":sesh['thrower']['_id']})\n\n\t\t# Update the session\n\t\tsesh['thrower']['org'] = data['org']\n\t\tsesh.save()\n\n\t\t# Return OK\n\t\treturn Services.Effect(True)", "def sell_cost(self):\n return self._manager.get_sell_price(self.name)", "def get_provider(self):\r\n if self.provided_by:\r\n return list(self.provided_by)[0]", "def sticker(self, sticker_id):\r\n return Sticker(self, sticker_id)", "def setSteer(self, steer):\r\n if steer < -1.0:\r\n steer = -1.0\r\n elif steer > 1.0:\r\n steer = 1.0\r\n self.steer = steer\r\n for tire in self.tires:\r\n if tire.steerable:\r\n if ( steer < 0.0 and tire.model.getX() > 0.0 ) or ( steer > 0.0 and tire.model.getX() < 0.0 ):\r\n tire.shape.setSteerAngle( self.innerSteer * steer )\r\n else:\r\n tire.shape.setSteerAngle( self.outerSteer * steer )", "def set_price(self, _price):\n self.price = _price\n return self.price", "def market(self):\n return self._market", "def test_put_small_and_light_enrollment_by_seller_sku(self):\n pass", "def developer(self):\n return self._developer", "def set_pkg_supplier(self, doc, entity):\n self.assert_package_exists()\n if not self.package_supplier_set:\n self.package_supplier_set = True\n if validations.validate_pkg_supplier(entity):\n doc.package.supplier = entity\n return True\n else:\n raise SPDXValueError('Package::Supplier')\n else:\n raise CardinalityError('Package::Supplier')", "def distributor(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"distributor\")", "def distributor(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"distributor\")", "def rental_offers(self, rental_offers):\n\n self._rental_offers = rental_offers", "def _sell(self, amount, price):\n params = {\"pair\": self.pair, \"type\" : \"sell\", \"rate\" : price, \"amount\" : amount}\n response = self._send_request(\"Trade\", params)\n if \"error\" in response:\n raise TradeException(response[\"error\"])" ]
[ "0.8109366", "0.698005", "0.62128127", "0.6178388", "0.6039728", "0.6028439", "0.5871527", "0.57780135", "0.5721163", "0.56939894", "0.56875074", "0.5485203", "0.5411141", "0.53689885", "0.5301218", "0.5298088", "0.5297234", "0.52722865", "0.52558464", "0.5248467", "0.52440137", "0.52382654", "0.51269865", "0.5107029", "0.5087492", "0.5065944", "0.5065855", "0.5055415", "0.50124544", "0.49981597", "0.49977234", "0.49940023", "0.49870047", "0.4950819", "0.49320576", "0.4913442", "0.48654976", "0.48539874", "0.4849648", "0.48289478", "0.48282602", "0.48142725", "0.48129237", "0.48077676", "0.4795837", "0.47908822", "0.4786495", "0.47839874", "0.47831178", "0.47772843", "0.47661558", "0.47462475", "0.4745652", "0.474414", "0.47228074", "0.47151178", "0.4711821", "0.4710102", "0.47030732", "0.47027606", "0.46939728", "0.46925154", "0.46907812", "0.46905437", "0.46901533", "0.46854904", "0.4683681", "0.4682281", "0.46771583", "0.46745473", "0.46719566", "0.46685252", "0.4664708", "0.4655106", "0.46488166", "0.4647732", "0.46469465", "0.46328795", "0.46326828", "0.4616094", "0.4612038", "0.46102202", "0.4606722", "0.4606722", "0.46003368", "0.45824575", "0.4570776", "0.4570001", "0.45699453", "0.45694694", "0.4569065", "0.4568244", "0.45680198", "0.45643368", "0.45615765", "0.45566592", "0.4552531", "0.4552531", "0.4536235", "0.45328784" ]
0.83205086
0
Gets and sets the totalLicenseCount
def total_license_count(self): if "totalLicenseCount" in self._prop_dict: return self._prop_dict["totalLicenseCount"] else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def used_license_count(self):\n if \"usedLicenseCount\" in self._prop_dict:\n return self._prop_dict[\"usedLicenseCount\"]\n else:\n return None", "def license_count(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"license_count\")", "def license_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"license_count\")", "def license_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"license_count\")", "def test_get_resource_license_resource_count_list(self):\n pass", "def increment_library_count(self, purchased_ebooks):\n\t\tself.library_count += purchased_ebooks", "def license_counting_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"license_counting_type\")", "def get_total_count(self):\n return self.total_count", "def _update_count(self):\n self._count = len(self._items)", "def count_total(self):\n\t\twith self._c_lock: # I can't believe I implemented a lock for a counter. Safety first, I guess...\n\t\t\treturn self._total_count", "def total_nucleus_clients(self, total_nucleus_clients):\n\n self._total_nucleus_clients = total_nucleus_clients", "def count_total(self):\n\t\twith self._c_lock: # I can't believe I implemented a lock for a counter. Safety first, I guess...\n\t\t\treturn self._total_count", "def total_count(self) -> int:\n return self.__total_count", "def count(self):\n \n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def GetCount(self):\n return(self.count)", "def license_number(self):\n return self._license_number", "def update_library_count(self, ebook_count):\n\t\tself.library_count = ebook_count", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n return self._total_results", "def get_count(self):\r\n return self.count", "def get_count(self):\n\n\t\treturn self.__count", "def update_count(self):\n pass", "def get_count(self):\n return self._count", "def count(self):\n return self.properties.get('count')", "def get_count(self):\n return self.count", "def get_count(self):\n return self.count", "def _grand_total(self):\n count = 0\n for product in self.products:\n count += product.price\n return count", "def get_TotalCount(self):\n return self._output.get('TotalCount', None)", "def count(self):\n return self.get_count()", "def refresh(self):\n self._get_license_details()", "def update_total_rolls(self):\n\n # Incremene the attribute by 1\n self._total_rolls += 1", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self.__count", "def count(self):\n return self.vcount", "def update_count(self):\n pass # Do nothing", "def total(self):\n\t\treturn self._total", "def LacCount(self):\n if self.force_auto_sync:\n self.get('LacCount')\n return self._LacCount", "def license_counting_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"license_counting_type\")", "def total(self) -> int:\n return self._total", "def GetVendorCount(self):\n regionVectorData = self.VectorData[self.SelectedRegion]\n return regionVectorData['Vendors']", "def price_count(self):\n return self.price_set.count()", "def license_count_hard_limit(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"license_count_hard_limit\")", "def carn_count(self):\n return len(self.carnivores)", "def license_counting_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"license_counting_type\")", "def count(self) -> float:\n return pulumi.get(self, \"count\")", "def count(self):\n # TODO not implemented yet\n return 0", "def getCount(self):\n return self.count", "def total_cargo(self):\n return self._total_cargo", "def total_count(self):\n return self.applied_count + self.error_count", "def count(self, count):\n\n self._count = count", "def count(self, count):\n\n self._count = count", "def count(self, count):\n\n self._count = count", "def count(self, count):\n\n self._count = count", "def count(self, value):\n raise AttributeError(\"count is read only\")", "def available(self):\n if self._count is not None:\n # If count is available, use it\n return self._count\n else:\n # We really have no idea.\n # Don't know what do do here, but for this\n # impl, which should only be constructed with\n # python lists, self._count should never be none.\n return 0", "def count(self):\n\n if self.cluster:\n return self.cluster.count()\n\n return super().count()", "def credits_earned(self):\n\n if self.grade() >= 69.5:\n return self.nCredits\n else:\n return 0.0", "def updateGACount(self):\n self.ga_count += 1", "def get_total_distributed(self) -> int:\n return self._total_distributed.get()", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def getNumPurchased(self):\n return self.numberPurchased", "def __len__(self):\n return self.total", "def GetInstalledSlotLicenseCount(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[int, None]\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\n \"getInstalledSlotLicenseCount\", payload=payload, response_object=None\n )", "def totalCount(self):\n return sum(self.values())", "def totalCount(self):\n return sum(self.values())", "def totalCount(self):\n return sum(self.values())", "def add_count(self):\n self.count += 1", "def count(self):\n return clone_counter._count", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def total_count(self):\n res = self.con.execute('select sum(count) from cc').fetchone();\n if res == None:\n return 0\n return res[0]", "def _count_subscriptions(self):\n for partner in self:\n subscriptions = self.env['subscription.subscription']\n count = subscriptions.sudo().search_count([('partner_id', '=', partner.id)])\n for child in partner.child_ids:\n count += subscriptions.sudo().search_count([('partner_id', '=', child.id)])\n partner.subscriptions_count = count", "def product_count(self) -> int:\n return self._product_count", "def total_nucleus_clients_active(self, total_nucleus_clients_active):\n\n self._total_nucleus_clients_active = total_nucleus_clients_active", "def test_get_resource_license_resource_count_by_moid(self):\n pass", "def bus_total_customers(self) -> int:\n return self.dss_obj.BUSI(4, 0)", "def GetTotal(self):\n return(self.total)", "def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count", "def get_total_assigned(self):\n return sum(self.n_assigned_list)", "def Count(self):\n return self._get_attribute('count')", "def set_total(self):\n\n self.total = 0\n for item in self.items.all():\n self.total += item.price\n self.save()", "def count(self, count: int):\n\n self._count = count", "def count(self):\n\n raise NotImplementedError", "def __setKeyCount(self,\n key,\n count):\n self.__keyCount[key] = count\n return self.__keyCount[key]", "def __len__(self):\n return int(self.total)", "def get_total_number_of_documents(self):\n return self.total_number_of_documents", "def count(cls, client) :\n\t\ttry :\n\t\t\tobj = lsntransportprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "def get_test_amount(self):\n\n return len(self.__test_set_list)", "def setCount(self, num):\n self.count=num" ]
[ "0.75288534", "0.7378034", "0.7028874", "0.7028874", "0.6017668", "0.590693", "0.5812739", "0.5784844", "0.57754", "0.5763398", "0.57567894", "0.57275337", "0.56986785", "0.5680325", "0.5677107", "0.5677107", "0.5677107", "0.5677107", "0.5677107", "0.5677107", "0.5677107", "0.5677107", "0.5677107", "0.5677107", "0.5662815", "0.565595", "0.5644629", "0.5623835", "0.5612979", "0.5552854", "0.55301684", "0.55221146", "0.55179495", "0.55162287", "0.55162287", "0.5513328", "0.5510921", "0.5500954", "0.5481649", "0.54759675", "0.54707205", "0.54707205", "0.54707205", "0.5466275", "0.54587686", "0.5437978", "0.54312754", "0.54236454", "0.5418518", "0.54113966", "0.5408339", "0.5401346", "0.53828657", "0.5373372", "0.5372059", "0.53677565", "0.5341533", "0.5333022", "0.5328909", "0.532321", "0.5310135", "0.5310135", "0.5310135", "0.5310135", "0.5309261", "0.53068906", "0.5296629", "0.52934057", "0.52868694", "0.5280092", "0.527732", "0.52770936", "0.52750075", "0.52727437", "0.52678543", "0.52678543", "0.52678543", "0.5267509", "0.52599514", "0.5256588", "0.5256588", "0.5251307", "0.52457315", "0.52392787", "0.52379113", "0.52120167", "0.52115774", "0.5209737", "0.5207723", "0.52056664", "0.51971745", "0.51955456", "0.5191426", "0.518758", "0.5185063", "0.5166999", "0.5159114", "0.5157993", "0.51488906", "0.51478076" ]
0.81494826
0
Gets and sets the usedLicenseCount
def used_license_count(self): if "usedLicenseCount" in self._prop_dict: return self._prop_dict["usedLicenseCount"] else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def total_license_count(self):\n if \"totalLicenseCount\" in self._prop_dict:\n return self._prop_dict[\"totalLicenseCount\"]\n else:\n return None", "def license_count(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"license_count\")", "def license_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"license_count\")", "def license_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"license_count\")", "def test_get_resource_license_resource_count_list(self):\n pass", "def license_number(self):\n return self._license_number", "def license_count_hard_limit(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"license_count_hard_limit\")", "def license_counting_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"license_counting_type\")", "def refresh(self):\n self._get_license_details()", "def increment_library_count(self, purchased_ebooks):\n\t\tself.library_count += purchased_ebooks", "def update_library_count(self, ebook_count):\n\t\tself.library_count = ebook_count", "def license_count_hard_limit(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"license_count_hard_limit\")", "def license_count_hard_limit(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"license_count_hard_limit\")", "def license_counting_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"license_counting_type\")", "def available(self):\n if self._count is not None:\n # If count is available, use it\n return self._count\n else:\n # We really have no idea.\n # Don't know what do do here, but for this\n # impl, which should only be constructed with\n # python lists, self._count should never be none.\n return 0", "def license_counting_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"license_counting_type\")", "def test_get_resource_license_resource_count_by_moid(self):\n pass", "def assigned_licenses(self):\n if \"assignedLicenses\" in self._prop_dict:\n return AssignedLicensesCollectionPage(self._prop_dict[\"assignedLicenses\"])\n else:\n return None", "def assigned_licenses(self):\n if \"assignedLicenses\" in self._prop_dict:\n return AssignedLicensesCollectionPage(self._prop_dict[\"assignedLicenses\"])\n else:\n return None", "def license(self, license):\n\n self._license = license", "def GetVendorCount(self):\n regionVectorData = self.VectorData[self.SelectedRegion]\n return regionVectorData['Vendors']", "def set_count(c):\n global count\n count = c", "def setNumPurchased(self, val):\n self.numberPurchased = val", "def get_licence(self, _return):\n return _return.licence.licence_number", "def software_license(self) -> str:\n return self.random.choice(LICENSES)", "def _update_count(self):\n self._count = len(self._items)", "def update_count(self):\n pass", "def setCount(self, num):\n self.count=num", "def update_count(self):\n pass # Do nothing", "def license_number(self, license_number):\n\n self._license_number = license_number", "def set_count(self, count):\n self._count = count", "def licenses(self) -> Sequence[str]:\n return pulumi.get(self, \"licenses\")", "def __setKeyCount(self,\n key,\n count):\n self.__keyCount[key] = count\n return self.__keyCount[key]", "def licenses(self):\n buf_size = self.MAX_BUF_SIZE\n buf = (ctypes.c_char * buf_size)()\n res = self._dll.JLINK_GetAvailableLicense(buf, buf_size)\n if res < 0:\n raise errors.JLinkException(res)\n return ctypes.string_at(buf).decode()", "def releaseLicence(self):\n\t\t\tpulpCPLEX.releaseLicence()", "def Available(self) -> int:", "def Available(self) -> int:", "def Available(self) -> int:", "def used_current(self, used_current):\n self._used_current = used_current", "def set_license_analytics(self, license_params: dict) -> PrivXAPIResponse:\n response_status, data = self._http_post(\n UrlEnum.LICENSE.OPT_IN,\n body=license_params,\n )\n return PrivXAPIResponse(response_status, HTTPStatus.OK, data)", "def erase_licenses(self):\n res = self._dll.JLINK_EMU_EraseLicenses()\n return (res == 0)", "def GetInstalledSlotLicenseCount(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[int, None]\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\n \"getInstalledSlotLicenseCount\", payload=payload, response_object=None\n )", "def target_lun_in_use_count(self):\n return self._target_lun_in_use_count", "def collectGem(self):\n self.gems += 1", "def License(self, default=None):\n return self.data.get('license', default)", "def timesUsed(self)->int:\n return self._lic.params['usedTimes'].value", "def count(self, count):\n\n self._count = count", "def count(self, count):\n\n self._count = count", "def count(self, count):\n\n self._count = count", "def count(self, count):\n\n self._count = count", "def request_count(self, request_count):\n\n self._request_count = request_count", "def extr_lic(self, doc):\n return doc.extracted_licenses[-1]", "def set_pkg_license_declared(self, doc, lic):\n self.assert_package_exists()\n if not self.package_license_declared_set:\n self.package_license_declared_set = True\n if validations.validate_lics_conc(lic):\n doc.package.license_declared = lic\n return True\n else:\n raise SPDXValueError('Package::LicenseDeclared')\n else:\n raise CardinalityError('Package::LicenseDeclared')", "def LacCount(self):\n if self.force_auto_sync:\n self.get('LacCount')\n return self._LacCount", "def updateGACount(self):\n self.ga_count += 1", "def grabLicence(self):\n\t\t\treturn pulpCPLEX.grabLicence()", "def licenses(self,filter=None,n_rows=None,n_random=None,\n offset=None,query=None,sort_by=None,order=None,\n facet=None,cursor=None,select=None,return_type=None):\n \n params = self._options_to_dict(filter=filter,n_rows=n_rows,\n n_random=n_random,offset=offset,query=query,\n sort_by=sort_by,order=order,facet=facet,cursor=cursor,\n select=None)\n \n url = self.BASE_URL + 'licenses'\n #return self._make_search_request(url,models.LicenseSearchResult,options,_filter)\n return self._make_get_request(url,models.LicenseSearchResult,params,return_type)", "def set_license_queryset(self, access_policy):\n self.fields['license'].queryset = License.objects.filter(\n resource_types__icontains=str(self.instance.resource_type.id),\n access_policy=access_policy)", "def used_at_enroll(self, used_at_enroll):\n\n self._used_at_enroll = used_at_enroll", "def EnableLicenseCheck(self):\n return self._get_attribute('enableLicenseCheck')", "def custom_licenses(self):\n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n result = self._dll.JLINK_EMU_GetLicenses(buf, self.MAX_BUF_SIZE)\n if result < 0:\n raise errors.JLinkException(result)\n return ctypes.string_at(buf).decode()", "def version(self):\n return self._num_resets", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n \n return self._count", "def count(self, count: int):\n\n self._count = count", "def count(self, value):\n raise AttributeError(\"count is read only\")", "def get_count(self):\r\n return self.count", "def user_licenses(self) -> Sequence[str]:\n return pulumi.get(self, \"user_licenses\")", "def estimated_holdings_by_language(self, include_open_access=True):\n _db = Session.object_session(self)\n qu = _db.query(\n Edition.language, func.count(Work.id).label(\"work_count\")\n ).select_from(Work).join(Work.license_pools).join(\n Work.presentation_edition\n ).filter(Edition.language != None).group_by(Edition.language)\n qu = self.restrict_to_ready_deliverable_works(qu)\n if not include_open_access:\n qu = qu.filter(LicensePool.open_access==False)\n counter = Counter()\n for language, count in qu:\n counter[language] = count\n return counter", "def license_date(self, license_date):\n\n self._license_date = license_date", "def __init__(__self__, *,\n license_counting_type: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n license_count: Optional[pulumi.Input[int]] = None,\n license_count_hard_limit: Optional[pulumi.Input[bool]] = None,\n license_rules: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"license_counting_type\", license_counting_type)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if license_count is not None:\n pulumi.set(__self__, \"license_count\", license_count)\n if license_count_hard_limit is not None:\n pulumi.set(__self__, \"license_count_hard_limit\", license_count_hard_limit)\n if license_rules is not None:\n pulumi.set(__self__, \"license_rules\", license_rules)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def num_available(self) -> int:\n return len(self)", "def limit_num_clients(self):\n return self._limit_num_clients", "def getNumPurchased(self):\n return self.numberPurchased", "def test_get_software_set(self):\n pass", "def count(self, count: int) -> None:\n self._count = count", "def pycount(self):\n\n self.count += 1\n return self.count", "def credits_earned(self):\n\n if self.grade() >= 69.5:\n return self.nCredits\n else:\n return 0.0", "def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)", "def qs_license():\r\n paragraph = document.add_paragraph('')\r\n document.add_heading('License', level=1)\r\n lic_metric = ['lef', 'serial', 'name', 'organization', 'product', 'numberOfCores', 'isExpired', 'expiredReason', 'isBlacklisted', 'isInvalid']\r\n qs_lic = get_qlik_sense.get_license()\r\n num_of_metric = len(qs_lic)\r\n table = document.add_table(rows=num_of_metric+1, cols=2)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'details'\r\n\r\n for metric in range(len(qs_lic)):\r\n row = table.rows[metric+1]\r\n row.cells[0].text = str(lic_metric[metric])\r\n row.cells[1].text = str(qs_lic[metric][0])\r\n document.add_page_break()", "def get_count(self):\n\n\t\treturn self.__count", "def my_word_counts(self):\n return self._get(\"version\")", "def add_count(self):\n self.count += 1", "def count(self, val):\n raise ValueError('cannot set \\'count\\' in class KeyTracker')", "def legalhold(self):\n return self._sdk_dependencies.legal_hold_client", "def current_reset_count(self, current_reset_count):\n\n self._current_reset_count = current_reset_count", "async def get_license(self) -> APIReturn:\n return await self._request(\"GET\", \"/getLicense\")", "def price_count(self):\n return self.price_set.count()", "def get_count(self):\n return self._count", "def get_count(self):\n return self.count", "def get_count(self):\n return self.count" ]
[ "0.70619345", "0.6962623", "0.68865573", "0.68865573", "0.61004454", "0.60076684", "0.5775451", "0.56807685", "0.562454", "0.5619409", "0.56166327", "0.5587048", "0.5587048", "0.5530649", "0.5519855", "0.5512422", "0.5486493", "0.5413517", "0.5413517", "0.5386872", "0.5385942", "0.53611755", "0.52875817", "0.52753955", "0.52464503", "0.52441555", "0.52247787", "0.51812845", "0.5177245", "0.5155754", "0.5146612", "0.5138218", "0.5114884", "0.51055115", "0.5088363", "0.5084107", "0.5084107", "0.5084107", "0.5075216", "0.5065336", "0.50536084", "0.50423", "0.5036368", "0.5031232", "0.5030792", "0.5026604", "0.50211275", "0.50211275", "0.50211275", "0.50211275", "0.5011964", "0.49926436", "0.4990279", "0.49713138", "0.4955667", "0.4950456", "0.4948272", "0.49414763", "0.49410006", "0.49253452", "0.49134335", "0.49120325", "0.49036196", "0.49036196", "0.49036196", "0.49036196", "0.49036196", "0.49036196", "0.49036196", "0.49036196", "0.49036196", "0.49036196", "0.48885658", "0.48881814", "0.4885781", "0.4885402", "0.48784563", "0.48761776", "0.48722604", "0.4843669", "0.48417938", "0.48393083", "0.48323318", "0.48307753", "0.48260263", "0.482231", "0.48162812", "0.4809798", "0.48090154", "0.48072875", "0.48072687", "0.4799859", "0.4795096", "0.47940925", "0.47918594", "0.47910663", "0.4788389", "0.4773385", "0.47607493", "0.47607493" ]
0.81816816
0
User's current state. Can be used to remember last step, build and parse callbacks.
def __new__(cls, name, build_pattern: str = None, parse_pattern: re.Pattern = None): obj = super().__new__(cls, name) if parse_pattern is not None: obj.parse_pattern = parse_pattern if build_pattern is not None: obj.build_pattern = build_pattern return obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_state():\n global current_state\n while current_state is None:\n pass\n return current_state", "def state_current(self, instance):\r\n return instance.user.profile.state_current", "def current_state(self):\n return self.obs_hook(self._current_obs)", "def get_current_state(self, data):\r\n return self.get_context()", "def current_state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"current_state\")", "def get_current_state(self):\n return self._current_state", "def CurrentState(self):\n return self.currentState", "def get_current_state(self):\n return self.game.get_current_state()", "def get_current_state(self):\n return self.world.get_state()", "def currentState(self):\n return self.currentState", "def current_state(self) -> str:\n return self._state_storage.state", "def current_state(self):\n LOGGER.debug('Getting current_state: %s', self._current_state)\n return self._current_state", "def getState(self):\r\n self.UpdateState()\r\n return self.cur_state", "def current_state(self):\n curr_state = dict(\n logfile=os.path.basename(self.logfile),\n time=self.time,\n converged=self.converged,\n solve_completed=self.solve_completed,\n converged_time=self.converged_time,\n failed=self.failed,\n fields=list(self.res_files.keys()),\n bounding_fields=list(self.bound_files.keys()))\n return curr_state", "def get_state(self):\n pass", "def state(self):\n return get_state(self.context)", "def _get_state(self):", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def get_current_state(self, model: Block):\n self.initial_state[model] = to_json(model, wts=StoreState, return_dict=True)\n\n return self.initial_state[model]", "def state(self):\n return self._current_value", "def get_state(self):\n return self.history", "def return_state(self):\n\t\treturn self.state", "def state(self, instance):\r\n return instance.user.profile.state", "def current_state(self) -> Optional[pulumi.Input['JobCurrentState']]:\n return pulumi.get(self, \"current_state\")", "def current(self):\n return self._wizard.current_step or self.first", "def current_operation(self):\n return self.state", "def state(self):\n return self.get_state()", "def get_current_state(self):\n\n return deepcopy(self.state)", "def get_state(self):\n return self.state", "def get_state(self):\n return self.state" ]
[ "0.6918716", "0.6859633", "0.68032247", "0.6791822", "0.6747422", "0.673759", "0.66993624", "0.6694381", "0.66105115", "0.655849", "0.6546558", "0.6440812", "0.64252305", "0.63976043", "0.63891476", "0.62961525", "0.62944114", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62815493", "0.62708825", "0.62488914", "0.6223163", "0.6201651", "0.61997265", "0.6178474", "0.61686975", "0.6153451", "0.6143878", "0.6136971", "0.6132199", "0.6132199" ]
0.0
-1
Creates/updates a Review object
def api_review(review_id=None): from models.review import Review # Retrieve an object if request.method == 'GET': if review_id is not None: obj = storage.get(Review, review_id) if obj is None: abort(404) else: return jsonify(obj.to_dict()) # Delete a specific object elif request.method == 'DELETE': if review_id is not None: obj = storage.get(Review, review_id) if obj is None: abort(404) else: storage.delete(obj) storage.save() return jsonify({}) else: abort(404) # Update a specific object elif request.method == 'PUT': if review_id is not None: review = storage.get(Review, review_id) if review is None: abort(404) if request.is_json: excl_attrs = ['id', 'user_id', 'place_id', 'created_at', 'updated_at'] incoming_json = request.get_json() for key, value in incoming_json.items(): if key not in excl_attrs: setattr(review, key, value) review.save() return jsonify(review.to_dict()) else: abort(400, 'Not a JSON')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def put_review(review_id):\n ignored_data = [\"id\", \"created_at\", \"updated_at\", \"user_id\", \"place_id\"]\n return put(cls, review_id, ignored_data)", "def put_review(review_id=None):\n\n review = storage.get(Review, review_id)\n if not review:\n abort(404)\n data = request.get_json()\n if not data:\n abort(400, 'Not a JSON')\n keys_ignore = ['id', 'user_id', 'recipe_id', 'created_at', 'updated_at']\n for key in data.keys():\n if key not in keys_ignore:\n setattr(review, key, data[key])\n review.save()\n return make_response(jsonify(review.to_dict()), 200)", "def save_review():\n prod_id = int(request.vars.prod_id)\n logger.info(\"saving review on prod_id {%s}\" %prod_id)\n content = request.vars.content\n db.reviews.update_or_insert(\n (db.reviews.prod_id == prod_id) & (db.reviews.user_email == auth.user.email),\n prod_id = prod_id,\n user_email = auth.user.email,\n review_content = content\n )\n return \"ok\" # Might be useful in debugging.", "def post_review(recipe_id=None):\n\n if not storage.get(Recipe, recipe_id):\n abort(404)\n data = request.get_json()\n if not data:\n abort(400, 'Not a JSON')\n if 'user_id' not in data.keys():\n abort(400, 'Missing user_id')\n if not storage.get(User, data['user_id']):\n abort(404)\n if 'text' not in data.keys():\n abort(400, 'Missing text')\n data['recipe_id'] = recipe_id\n new_review = Review(**data)\n storage.new(new_review)\n storage.save()\n return make_response(jsonify(new_review.to_dict()), 201)", "def review(self, review: object):\n\n self._review = review", "def create_review(place_id):\n place = storage.get(\"Place\", place_id)\n if place is None:\n abort(404)\n if not request.get_json():\n return jsonify({'error': 'Not a JSON'}), 400\n if 'user_id' not in request.get_json():\n return jsonify({'error': 'Missing user_id'}), 400\n user = storage.get(\"User\", request.get_json().get('user_id'))\n if user is None:\n abort(404)\n user_id = request.get_json().get('user_id')\n if 'text' not in request.get_json():\n return jsonify({'error': 'Missing text'}), 400\n text = request.get_json().get('text')\n obj = Review(text=text, place_id=place_id, user_id=user_id)\n obj.save()\n return jsonify(obj.to_dict()), 201", "def add_review(self, review: Review):\n raise NotImplementedError", "def new_review(place_id):\n body_dic = request.get_json()\n place = storage.get(Place, place_id)\n if not place:\n abort(404)\n if not body_dic:\n return jsonify({'error': 'Not a JSON'}), 400\n if \"user_id\" not in body_dic:\n return jsonify({'error': 'Missing user_id'}), 400\n user = storage.get(User, body_dic.get(\"user_id\", None))\n if not user:\n abort(404)\n if \"text\" not in body_dic:\n return jsonify({'error': 'Missing text'}), 400\n\n new_review = Review(**body_dic)\n setattr(new_review, \"place_id\", place_id)\n storage.new(new_review)\n storage.save()\n return jsonify(new_review.to_dict()), 201", "def test_save_review(self):\n self.new_review.save_review()\n self.assertTrue(len(Review.query.all()) > 0)", "def create_review(place_id):\n place = storage.get(\"Place\", place_id)\n if place is None:\n abort(404)\n req_json = request.get_json()\n if req_json is None:\n return make_response(jsonify({'error': \"Not a JSON\"}), 400)\n if 'user_id' not in req_json.keys():\n return make_response(jsonify({'error': \"Missing user_id\"}), 400)\n uid = req_json.get(\"user_id\")\n user = storage.get(\"User\", uid)\n if user is None:\n abort(404)\n if 'text' not in req_json.keys():\n return make_response(jsonify({'error': \"Missing text\"}), 400)\n req_json[\"place_id\"] = place_id\n data = Review(**req_json)\n data.save()\n return jsonify(data.to_json()), 201", "def create_review(place_id=None):\n place = storage.get(Place, place_id)\n if place:\n review = request.get_json()\n if not review:\n abort(400, \"Not a JSON\")\n if \"user_id\" not in review:\n abort(400, \"Missing user_id\")\n if not storage.get(\"User\", review[\"user_id\"]):\n abort(404)\n if \"text\" not in review:\n abort(400, \"Missing text\")\n else:\n review['place_id'] = place.id\n new_review = Review(**review)\n storage.new(new_review)\n storage.save()\n return jsonify(new_review.to_dict()), 201\n abort(404)", "def newreview():\n objectid = request.values.get('objectid', 0, type=int)\n if not objectid:\n abort(400)\n workflow_object = workflow_object_class.get(objectid)\n\n form = AuthorUpdateForm(\n data=workflow_object.extra_data[\"formdata\"], is_review=True)\n ctx = {\n \"action\": url_for('.reviewhandler', objectid=objectid),\n \"name\": \"authorUpdateForm\",\n \"id\": \"authorUpdateForm\",\n \"objectid\": objectid\n }\n\n return render_template('authors/forms/review_form.html', form=form, **ctx)", "def new():\n\n add_review = True\n\n form = CreateReview()\n if form.validate_on_submit():\n\n try:\n review = {\n \"score\": float(form.score.data),\n \"description\": form.description.data,\n \"games_id\": form.game_id.data,\n \"users_id\": form.users_id.data\n }\n\n print(review)\n new_review = Reviews()\n new_review.create(**review)\n \n # add employee to the database\n flash('You have successfully created a Review.')\n except:\n # in case department name already exists\n flash('Error: review already exists.')\n \n\n # redirect to the login page\n return redirect(url_for('review.index'))\n\n return render_template('review/new.html', action=\"Add\", add_review=add_review, form=form, title=\"Add Review\")", "def review(self, review):\n self._review = review", "def addreview(self, name, year, genre, rating, review, reviewer):\n pass", "def approve_obj_review(selenium, obj):\n _get_ui_service(selenium, obj).approve_review(obj)\n return obj.update_attrs(\n review=entities_factory.ReviewsFactory().create(\n status=element.ReviewStates.REVIEWED,\n last_reviewed_by=users.current_user().email,\n last_reviewed_at=rest_facade.get_last_review_date(obj),\n reviewers=users.current_user()),\n updated_at=rest_facade.get_obj_review(obj).updated_at)", "def update_review(review_id):\n user_input = request.get_json()\n if user_input is None:\n abort(400, {'message': 'Not a JSON'})\n obj = storage.get(Review, review_id)\n if obj is None:\n abort(404)\n for k, v in user_input.items():\n if k not in ['id', 'user_id', 'place_id',\n 'created_at', 'updated_at']:\n setattr(obj, k, v)\n obj.save()\n return jsonify(obj.to_dict()), 200", "def persist_review_object(yro, cursor):\n try:\n # Review\n sql = \" INSERT INTO Review \" \\\n \" (review_id, business_id, user_id, stars, review_text, review_date) \" \\\n \" VALUES \" \\\n \" (%s, %s, %s, %s, %s, %s) \"\n cursor.execute(sql, [yro.review_id, yro.business_id, yro.user_id, yro.stars, yro.review_text, yro.review_date])\n\n # Review_Votes\n for vote_type, vote_count in yro.votes.iteritems():\n sql = \" INSERT INTO Review_Votes \" \\\n \" (review_id, business_id, user_id, vote_type, vote_count) \" \\\n \" VALUES \" \\\n \" (%s, %s, %s, %s, %s) \"\n cursor.execute(sql, [yro.review_id, yro.business_id, yro.user_id, vote_type, vote_count])\n\n cursor.connection.commit()\n except MySQLdb.Error as err:\n cursor.connection.rollback()\n print err\n print \"Error with review_id {0}, business_id {1}, user_id {2}\" \\\n .format(yro.review_id, yro.business_id, yro.user_id)", "def update_review(review_id):\n review_obj = storage.get(Review, review_id)\n if review_obj:\n body_dic = request.get_json()\n if not body_dic:\n return jsonify({'error': 'Not a JSON'}), 400\n for key, value in body_dic.items():\n setattr(review_obj, key, value)\n review_obj.save()\n return jsonify(review_obj.to_dict()), 200\n else:\n abort(404)", "def test_review(self):\n new_review = Review()\n self.assertIs(type(new_review.id), str)\n self.assertIs(type(new_review.created_at), datetime)\n self.assertIs(type(new_review.updated_at), datetime)\n self.assertIs(type(new_review.place_id), str)\n self.assertIs(type(new_review.user_id), str)\n self.assertIs(type(new_review.text), str)", "def reviews_collection(request):\n if request.method == \"POST\":\n data = json.loads(request.body)\n task = Task.objects.get(id=data.get(\"taskId\", \"\"))\n reviewer = User.objects.get(username=data.get(\"reviewer\", \"\"))\n reviewee = User.objects.get(username=data.get(\"reviewee\", \"\"))\n rating = data.get(\"rating\", \"\")\n content = data.get(\"content\", \"\")\n\n review = Review(\n task=task,\n reviewer=reviewer,\n reviewee=reviewee,\n rating=rating,\n content=content\n )\n review.save()\n\n serializer = ReviewSerializer(review)\n return Response(serializer.data)", "def write_review(request):\n form = ReviewForm\n\n if request.method == 'POST':\n form_data = {\n 'title': request.POST['title'],\n 'description': request.POST['description'],\n 'author': request.POST['author'],\n }\n\n form = ReviewForm(form_data)\n\n if form.is_valid:\n form.save()\n messages.success(\n request, f'Review added successfully! Thanks!')\n else:\n messages.error(\n request, f'Upps something went wrong, please try again')\n\n context = {\n 'form': form\n }\n return render(request, 'reviews/write_review.html', context)", "def update_review(review_id):\n if not request.get_json():\n return jsonify({'error': 'Not a JSON'}), 400\n obj = storage.get(\"Review\", review_id)\n if obj is None:\n abort(404)\n for k, v in request.get_json().items():\n if k not in ['id', 'user_id', 'place_id', 'created_at', 'updated_at']:\n setattr(obj, k, v)\n storage.save()\n return jsonify(obj.to_dict())", "def add_review(game_name):\n game = Game.from_mongo(**mongo.db.games.find_one({ \"name\": game_name }))\n username = session.get('username')\n if username is not None:\n user_dict = mongo.db.users.find_one({\"name\": username})\n user = User.from_mongo(**user_dict)\n\n form = ReviewForm()\n if form.validate_on_submit():\n author_ref = user.create_author_ref()\n pub_date = str(datetime.now(timezone.utc))\n game_ref = game.create_game_ref()\n \n new_review = Review.add_review(\n name=form.title.data,\n game=game.label,\n author=user.name, \n author_id=user._id, \n text=form.review_text.data, \n game_id=game._id, \n pub_date=pub_date, \n game_ref=game_ref, \n author_ref=author_ref\n )\n flash('Review Successfully Posted')\n review_ref = new_review.create_review_ref()\n game.reviews.append(review_ref)\n game.update_game()\n game_ref = game.create_game_ref()\n user.reviews.append(review_ref)\n if game_ref not in user.game_list:\n user.game_list.append(game_ref)\n user.update_user()\n return redirect(url_for('review', review_id=new_review._id))\n return render_template('add_review.html', game_name=game_name, user=user, game=game, form=form)\n else:\n flash('Please log in to post a review')\n return redirect(url_for('login'))", "def update_review(review_id):\n review = storage.get(\"Review\", review_id)\n if review is None:\n abort(404)\n ignore_keys = ['id', 'user_id', 'place_id', 'created_at', 'updated_at']\n req_json = request.get_json()\n if req_json is None:\n return make_response(jsonify({'error': \"Not a JSON\"}), 400)\n for k, v in req_json.items():\n if k not in ignore_keys and hasattr(review, k):\n setattr(review, k, v)\n review.save()\n return jsonify(review.to_json()), 200", "def add_review(place_id):\n # Check that user input is correct\n user_input = request.get_json()\n if user_input is None:\n abort(400, {'message': 'Not a JSON'})\n elif user_input.get('text') is None:\n abort(400, {'message': 'Missing text'})\n elif user_input.get('user_id') is None:\n abort(400, {'message': 'Missing user_id'})\n else:\n # Review is linked to user and city\n u_id = user_input.get('user_id')\n if storage.get(User, u_id) is None:\n abort(404)\n elif storage.get(Place, place_id) is None:\n abort(404)\n else:\n obj = Review(**user_input)\n obj.user_id = u_id\n obj.place_id = place_id\n storage.new(obj)\n storage.save()\n return jsonify(obj.to_dict()), 201\n abort(404)", "def add_new_review(restaurant_id):\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n if request.method == 'POST':\n post = request.get_json()\n if 'username' not in login_session:\n new_review = Reviews(reviewer_name='anonymous',\n review=post.get('review'),\n stars=post.get('stars'),\n restaurant_id=restaurant_id,\n time=datetime.utcnow())\n else:\n new_review = Reviews(reviewer_name=login_session['username'],\n review=post.get('review'),\n stars=post.get('stars'),\n restaurant_id=restaurant_id,\n time=datetime.utcnow())\n session.add(new_review)\n session.commit()\n\n return redirect(url_for('restaurants_page'))", "def post(self, request, *args, **kwargs):\n view = ReviewForm.as_view()\n return view(request, *args, **kwargs)", "def put_review(review_id=None):\n if review_id:\n for item in storage.all(Review).values():\n if review_id == item.id:\n is_json = request.get_json()\n if is_json is None:\n abort(400, description=\"Not a Json\")\n\n item.text = is_json.get(\"text\")\n storage.save()\n return (jsonify(item.to_dict()))\n abort(404)\n abort(404)", "def add_review(self):\n url = \"/review/create/%s\" % self.picture.id\n self.browser.get(\"%s%s\" %\n (str(self.live_server_url), url))\n\n select = Select(self.browser.find_element_by_id(\n \"id_score_intention\"))\n select.select_by_index(4)\n select = Select(self.browser.find_element_by_id(\n \"id_score_technical\"))\n select.select_by_index(4)\n select = Select(self.browser.find_element_by_id(\n \"id_score_picture\"))\n select.select_by_index(4)\n select = Select(self.browser.find_element_by_id(\n \"id_score_global\"))\n select.select_by_index(4)\n\n self.browser.find_element_by_id(\n \"id_comment_intention\").send_keys(\"Commentaire intention\")\n\n submission_button = self.browser.find_element_by_class_name(\n 'btn-secondary')\n submission_button.click()\n time.sleep(2)\n html = self.browser.page_source\n self.assertInHTML(\"\"\"\n <h4 class=\"rouge-fonce\">Critique de test_login</h4>\n \"\"\",\n html)\n self.assertInHTML(\"\"\"\n <strong>Note moyenne de la revue : 4,0</strong>\n \"\"\",\n html)", "def create_resource_access_review(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_resource_access_review\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_resource_access_review`\")\n\n resource_path = '/oapi/v1/resourceaccessreviews'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ResourceAccessReview',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def update_review(review_id=None):\n review = storage.get(Review, review_id)\n if review:\n updated = request.get_json()\n if not updated:\n abort(400, \"Not a JSON\")\n for key, val in updated.items():\n if key not in ['id', 'created_at', 'updated_at',\n 'user_id', 'place_id']:\n setattr(review, key, val)\n storage.save()\n return jsonify(review.to_dict()), 200\n abort(404)", "def review_element(request, review_id):\n try:\n review = Review.objects.get(id=review_id)\n except Review.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"PUT\":\n data = json.loads(request.body)\n \n review.rating = data.get(\"rating\", \"\")\n review.content = data.get(\"content\", \"\")\n \n review.save()\n return JsonResponse({\"message\": \"Review updated successfully\"}, status=204)", "def add_review(self, review):\n # Assume this method body has been correctly implemented.\n self.reviews.append(review)", "def post(id):\n\n try:\n beer = Beer.objects.get(id=id)\n except mongoengine.DoesNotExist:\n return flask.Response('No beer with id {} found'.format(id), 404)\n except:\n return flask.Response('Invalid id {}'.format(id), 400)\n\n data = flask.request.get_json()\n\n # check to see if a review was already created for this beer from this\n # user\n try:\n Review.objects.get(beer=beer, user=flask.request.user)\n except mongoengine.DoesNotExist:\n pass\n else:\n return flask.Response('You\\'ve already created a review for beer {}'.format(id),\n 400)\n\n review = Review(beer=beer,\n user=flask.request.user)\n\n props = ['aroma', 'appearance', 'taste', 'palate', 'bottle_style']\n for item in props:\n if item in data:\n setattr(review, item, data[item])\n\n review.calculate()\n\n try:\n review.save()\n except mongoengine.ValidationError as exp:\n return flask.Response('{}'.format(exp), 400)\n\n beer.rating = Review.objects.all().filter(beer=beer).average('overall')\n beer.save()\n\n return JSONResponse(review.to_json())", "def test_update_review(self):\n\n user1 = User.objects.create_user('John')\n self.book.reviews.create(\n user=user1,\n rating=5,\n notes=\"It's so awesome\"\n )\n\n user2 = User.objects.create_user('Jane')\n review = self.book.reviews.create(\n user=user2,\n rating=4,\n notes=\"Love it\"\n )\n\n # update rating\n review.rating = 3\n review.save()\n\n # need to reload from database for updated rating value in book\n book = Book.objects.get(id=self.book.id)\n self.assertAlmostEqual(book.rating, 4)", "def reviewhandler():\n objectid = request.values.get('objectid', 0, type=int)\n if not objectid:\n abort(400)\n\n form = AuthorUpdateForm(formdata=request.form)\n visitor = DataExporter()\n visitor.visit(form)\n\n workflow_object = workflow_object_class.get(objectid)\n workflow_object.extra_data[\"approved\"] = True\n workflow_object.extra_data[\"ticket\"] = request.form.get('ticket') == \"True\"\n workflow_object.extra_data['formdata'] = visitor.data\n workflow_object.data = formdata_to_model(workflow_object, visitor.data)\n workflow_object.save()\n db.session.commit()\n\n resume.delay(workflow_object.id)\n\n return render_template('authors/forms/new_review_accepted.html',\n approved=True)", "def save_movie_and_review(name, fi_name, imdb_id, reviewer, review):\n db = __get_session()\n movie_rec = db.query(Movie).filter_by(imdb_id=imdb_id).first()\n\n if not movie_rec:\n\n movie_rec = Movie(name=name,\n imdb_id=imdb_id,\n fi_name=fi_name)\n\n db.add(movie_rec)\n db.commit()\n\n movie_id = movie_rec.id\n\n review_rec = Review(reviewer=reviewer,\n review_txt=review,\n timestamp=datetime.datetime.now(),\n movie_id=movie_id)\n\n db.add(review_rec)\n db.commit()\n review_id = review_rec.id\n db.close()\n\n return review_id", "def create(self, request, *args, **kwargs):\n request.data[\"shop\"] = 1\n self.is_review_body_valid(self.get_serializer(data=request.data)) # checks if body data is valid\n\n shop_pk = self.get_shop_pk(request.data.pop(\"shop_link\"))\n request.data[\"shop\"] = shop_pk\n\n return super().create(request, *args, **kwargs)", "def submit_obj_for_review(selenium, obj, reviewer):\n review_comment = string_utils.StringMethods.random_string()\n _get_ui_service(selenium, obj).submit_for_review(\n obj, reviewer.email, review_comment)\n obj.update_attrs(\n review=entities_factory.ReviewsFactory().create(reviewers=reviewer))\n exp_comment = entities_factory.CommentsFactory().create(\n description=element.Common.REVIEW_COMMENT_PATTERN.format(\n # reviewers emails in review comment message need to be sorted\n # as they are displayed in UI in random order\n emails=', '.join(sorted(obj.review[\"reviewers\"])),\n comment=review_comment))\n exp_comment.created_at = rest_service.ObjectsInfoService().get_comment_obj(\n paren_obj=obj, comment_description=review_comment).created_at\n obj.comments = [exp_comment.repr_ui()]\n return obj", "def setUp(self):\n self.new_review = Review(title = \"\")", "def post_review(place_id=None):\n if place_id:\n if storage.get(Place, place_id) is None:\n abort(404)\n\n is_json = request.get_json()\n if is_json is None:\n abort(400, description=\"Not a Json\")\n\n if is_json.get('user_id') is None:\n abort(400, description=\"Missing user_id\")\n\n if storage.get(User, is_json.get('user_id')) is None:\n abort(404)\n\n if is_json.get('text') is None:\n abort(400, description=\"Missing text\")\n\n new_review = Review(**is_json)\n new_review.place_id = place_id\n new_review.save()\n return(jsonify(new_review.to_dict())), 201\n\n abort(404)", "def edit_review(review_id):\n form = EditReviewForm()\n try:\n review = Review.from_mongo(**mongo.db.reviews.find_one({\"_id\": ObjectId(review_id)}))\n except Exception as e:\n raise Exception(e)\n else:\n game = Game.from_mongo(**mongo.db.games.find_one({\"_id\": ObjectId(str(review.game_id))}))\n user_name = session.get('username')\n if user_name == review.author_ref['author_name']:\n user = User.from_mongo(**mongo.db.users.find_one({\"name\": user_name}))\n\n if form.validate_on_submit():\n review.name = form.title.data\n review.text = form.review_text.data\n review_ref = review.create_review_ref()\n review.update_review()\n for game_review in game.reviews:\n if game_review.get('review_pub_date') == review.pub_date:\n game.reviews.remove(game_review)\n game.reviews.append(review_ref)\n game.update_game()\n for user_review in user.reviews:\n if user_review.get('review_pub_date') == review.pub_date:\n user.reviews.remove(user_review)\n user.reviews.append(review_ref)\n user.update_user()\n return redirect(url_for('review', review_id=review_id))\n\n elif request.method == \"GET\":\n form.title.data = review.name\n form.review_text.data = review.text\n\n return render_template('edit_review.html.jinja',\n title='Edit Review',\n review_id=review_id,\n form=form\n )", "def addReview(self, review):\n if isinstance(review, Review):\n if not any(other.__dict__ == review.__dict__ for other in self.anime_reviews):\n self.anime_reviews.append(review)\n else:\n print(\"DUPLICATE DICT\")\n else:\n raise ValueError(\"object is not instance of Review\")", "def review(book_id):\n\n # User id from current session\n user_id = session[\"user_id\"]\n # Form data\n try:\n rating = request.form.get('rating')\n text = request.form.get('review-text')\n except ValueError:\n return error('Something went wrong with submission.', 400)\n\n # Has user already submitted a review for this book\n book_id_duplicates = db.execute(\n \"SELECT user_id from reviews \"\n \"WHERE book_id = :book_id \"\n \"AND user_id = :user_id\",\n {'book_id': book_id, 'user_id': user_id}).fetchone()\n if book_id_duplicates is not None:\n return error('Only one submission per book allowed!', 403)\n\n _review = {\n \"user_id\": user_id,\n \"book_id\": int(book_id),\n \"rating\": int(rating),\n \"text\": text.rstrip() # Should user leave new line in textarea\n }\n\n # Save user review\n db.execute(\n \"INSERT INTO reviews (user_id, book_id, rating, text)\"\n \"VALUES (:user_id, :book_id, :rating, :text)\", _review)\n db.commit()\n\n # Reload the page, rendering their review\n return redirect(url_for(\"book\", book_id=book_id))", "def create_rating(user, movie, score):\n\n # pass in user object, movie object, score integer\n # To test this function in the interactive mode, \n # create the user and movie objects and then pass \n # in those objects as the arguments\n rating = Rating(user=user, movie=movie, score=score)\n\n db.session.add(rating)\n db.session.commit()\n\n return rating", "def add_review(self, review):\n review_issue = IParentGetter(review).get_parent_object_of_type(\"Issue\")\n if review_issue is None:\n review_issue = IParentGetter(review).get_parent_object_of_type(\"Volume\")\n if self.current_issue != review_issue:\n if self.current_issue:\n self.finish_issue()\n self.current_issue = review_issue\n self.reviews_xml.append(review.restrictedTraverse(self.xml_view_name)())", "def reviews(self, reviews: object):\n\n self._reviews = reviews", "def post_review(self, form):\n comments_file = form.cleaned_data.get('comments', None)\n return_code = form.cleaned_data.get('return_code', None)\n\n # Update the review\n self.object.post_review(comments_file, return_code=return_code)\n if return_code:\n self.revision.return_code = return_code\n\n verb = None\n # If every reviewer has posted comments, close the reviewers step\n if self.object.role == 'reviewer':\n qs = Review.objects \\\n .filter(document=self.document) \\\n .filter(revision=self.revision.revision) \\\n .filter(role='reviewer') \\\n .exclude(closed_on=None)\n if qs.count() == self.revision.reviewers.count():\n self.revision.end_reviewers_step(save=False)\n verb = Activity.VERB_CLOSED_REVIEWER_STEP\n\n # If leader, end leader step\n elif self.object.role == 'leader':\n self.revision.end_leader_step(save=False)\n verb = Activity.VERB_CLOSED_LEADER_STEP\n\n # If approver, end approver step\n elif self.object.role == 'approver':\n self.revision.end_review(save=False)\n verb = Activity.VERB_CLOSED_APPROVER_STEP\n\n self.revision.save(update_document=True)\n\n if verb:\n activity_log.send(verb=verb,\n target=self.revision,\n sender=do_batch_import,\n actor=self.request.user)", "def insert(self, movie_name, year_released, genre, rating, review, reviewer):\n params = {'movie_name': movie_name, 'year_released': year_released,'genre':genre, 'rating': rating, 'review': review, 'reviewer': reviewer}\n self.movie_reviews.append(params)\n return True", "def test_create_review(self):\n yield self.nodes[0].overlay.create_project(\"test\", \"specpointer\", \"01-02-03\", 300, \"EUR\", 5)\n yield self.deliver_messages()\n project = self.nodes[1].overlay.persistence.get_projects()[0]\n yield self.nodes[1].overlay.create_submission(project['public_key'].decode('hex'), project['id'], 'test')\n yield self.deliver_messages()\n\n # Do a review\n submission = self.nodes[0].overlay.persistence.get_submissions_for_project(project['public_key'].decode('hex'), project['id'])[0]\n yield self.nodes[0].overlay.create_review(submission['public_key'].decode('hex'), submission['id'], 'test')\n yield self.deliver_messages()\n\n self.assertTrue(self.nodes[1].overlay.persistence.get_reviews(submission['public_key'].decode('hex'), submission['id']))", "def add_movie_review(request):\n print (json.loads(request.body))\n serializer = MovieReviewsSerializer(data=json.loads(request.body))\n temp = json.loads(request.body)\n movie_rev = MovieReviews.objects.filter(user_id=temp['user_id'], movie_id = temp['movie_id'])\n if len(movie_rev) > 0:\n movie = Movie.objects.filter(pk=temp['movie_id'])\n serializer2 = MovieSerializer(movie, many=True)\n old = MovieReviewsSerializer(movie_rev, many=True).data[0]['rating']\n initial = serializer2.data[0]['rating']\n num = serializer2.data[0]['no_of_reviews']\n new_rating = ((initial*num)+(temp['rating']-old))/num\n MovieReviews.objects.filter(user_id=temp['user_id'], movie_id = temp['movie_id']).update(description=temp['description'], rating=temp['rating'])\n Movie.objects.filter(pk=temp['movie_id']).update(rating=new_rating)\n else:\n if serializer.is_valid():\n serializer.save()\n movie = Movie.objects.filter(pk=serializer.data['movie_id'])\n serializer2 = MovieSerializer(movie, many=True)\n initial = serializer2.data[0]['rating']\n num = serializer2.data[0]['no_of_reviews']\n print (num)\n if num == 0:\n Movie.objects.filter(pk=serializer.data['movie_id']).update(rating=serializer.data['rating'], no_of_reviews=1)\n else:\n new_val = ((initial*num)+serializer.data['rating'])/(num+1)\n Movie.objects.filter(pk=serializer.data['movie_id']).update(rating=new_val, no_of_reviews=num+1)\n serializer2 = MovieSerializer(movie, many=True)\n else: #return HttpResponse(\"done\")\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n MovieReviews.objects.filter(user_id=temp['user_id'], movie_id = temp['movie_id']).update(positivity=func(temp['description']))\n reviews = MovieReviews.objects.filter(user_id=temp['user_id'], movie_id=temp['movie_id'])\n serializer3 = MovieReviewsSerializer(reviews, many=True)\n return Response(serializer3.data, status=status.HTTP_201_CREATED)", "def submit_review():\n \n reviewer = request.form.get('reviewer')\n review = request.form.get('review')\n name = request.form.get('name')\n fi_name = request.form.get('fi_name')\n imdb_id = request.form.get('imdb_id')\n year = request.form.get('year')\n timestamp = request.form.get('timestamp')\n\n # Save review and movie first, if no record yet\n review_id = save_movie_and_review(name, fi_name, imdb_id, reviewer, review)\n if review_id:\n return \"Thank you, \" + reviewer + \". Your review was saved!\"\n else:\n return \"Something went wrong!\"", "def get_reviews(review_id):\n if review_id:\n review = storage.get(Review, review_id) # retrieves obj\n if review is None:\n return jsonify({'error': 'Not found'}), 404\n if request.method == 'DELETE':\n storage.delete(review) # deletes\n storage.save()\n return jsonify({}), 200\n elif request.method == 'PUT':\n js = request.get_json()\n if js is None:\n return jsonify({'error': 'Not a JSON'}), 400\n js.pop('id', None)\n js.pop('user_id', None)\n js.pop('place_id', None)\n js.pop('created_at', None)\n js.pop('updated_at', None)\n for key, value in js.items():\n setattr(review, key, value) # updates\n review.save()\n return jsonify(review.to_dict()), 200\n else:\n return jsonify(review.to_dict()), 200\n\n if request.method == 'POST':\n js = request.get_json()\n if js is None:\n return jsonify({'error': 'Not a JSON'}), 400\n if js.get('user_id', None) is None:\n return jsonify({'error': 'Missing user_id'}), 400\n if js.get('text', None) is None:\n return jsonify({'error': 'Missing text'}), 400\n obj = Review(**js) # creates\n obj.save()\n return jsonify(obj.to_dict()), 201\n\n reviews = []\n reviews_obj = storage.all('Review') # retrieves list obj\n for obj in reviews_obj:\n reviews.append(reviews_obj[obj].to_dict())\n return jsonify(reviews)", "def holdingpenreview():\n objectid = request.values.get('objectid', 0, type=int)\n approved = request.values.get('approved', False, type=bool)\n ticket = request.values.get('ticket', False, type=bool)\n if not objectid:\n abort(400)\n workflow_object = workflow_object_class.get(objectid)\n workflow_object.extra_data[\"approved\"] = approved\n workflow_object.extra_data[\"ticket\"] = ticket\n workflow_object.save()\n db.session.commit()\n\n resume.delay(workflow_object.id)\n\n return render_template('authors/forms/new_review_accepted.html',\n approved=approved)", "def test_attributes_Review(self):\n obj = Review()\n self.assertIsInstance(obj.place_id, str)\n self.assertIsInstance(obj.user_id, str)\n self.assertIsInstance(obj.text, str)", "def create_rating(input_user_id, input_rating, input_movie_id):\n \n rating = Rating(user_id=input_user_id, rating=input_rating, movie_id=input_movie_id)\n \n db.session.add(rating)\n db.session.commit()\n\n return rating", "def submitToReview(self, obj):\n self.wftool.doActionFor(obj, \"submit\")", "def add_review(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n\n if request.method == 'POST': \n review_form = ReviewForm(request.POST)\n if review_form.is_valid():\n review = review_form.save(commit=False)\n review.product = product\n review.user = request.user\n review.save()\n messages.info(request, \"Your review has been received! Thank you for your interest.\")\n return redirect(reverse('product_detail', args=[product_id]))\n else:\n print(review_form.errors)\n \n return redirect(reverse('product_detail', args=[product_id]))", "def new_review(request):\n user_profile = UserProfile.objects.get(user=request.user)\n\n if request.user.is_authenticated:\n if request.method == 'POST':\n review_form = ReviewForm(request.POST)\n if review_form.is_valid():\n if len(request.POST[\"review_content\"]) <= 0 or len(\n request.POST[\"product\"]) <= 0:\n messages.error(\n request, \"You haven't completed the review form! \\\n Please add content and try again.\")\n return redirect(reverse(\"gallery\"))\n new_review = review_form.save(commit=False)\n new_review.user_profile = user_profile\n review_form.save()\n messages.success(request, 'Your review has \\\n been added.')\n return redirect(reverse(\"gallery\"))\n else:\n messages.error(request, 'Your review could not be added. \\\n Please check that your review is valid.')\n\n template = 'gallery/gallery.html'\n context = {\n 'review_form': review_form,\n }\n\n return render(request, template, context)", "def test_instance_Review(self):\n self.assertIsInstance(self.review, Review)", "def put(id, rid):\n\n try:\n beer = Beer.objects.get(id=id)\n except mongoengine.DoesNotExist:\n return flask.Response('No beer with id {} found'.format(id), 404)\n except:\n return flask.Resposne('Invalid beer id {}'.format(id), 400)\n\n try:\n review = Review.objects.get(id=rid, beer=beer)\n except mongoengine.DoesNotExist:\n return flask.Response('No review with id {} found'.format(rid), 404)\n except:\n return flask.Response('Invalid review id {}'.format(id), 400)\n\n data = flask.request.get_json()\n\n # update an of our simple fields\n props = ['aroma', 'appearance', 'taste', 'palate', 'bottle_style']\n for item in props:\n if item in data:\n setattr(review, item, data[item])\n\n review.calculate()\n\n try:\n review.save()\n except mongoengine.ValidationError as exp:\n return flask.Response('{}'.format(exp), 400)\n\n beer.rating = Review.objects.all().filter(beer=beer).average('overall')\n beer.save()\n\n return JSONResponse(review.to_json())", "def review(user_id, item_id, text, rating):\n if Review.objects.filter(user=user_id, item=item_id):\n return \"You already wrote a review!\"\n\n form = ReviewForm({\n 'user': user_id,\n 'item': item_id,\n 'text': text,\n 'rating': rating,\n 'agrees': 0,\n 'thanks': 0\n })\n if form.is_valid():\n form.save()\n return False\n return \"Something was wrong with the review you submitted!\"", "def create(title, head, base='master', message=''):\n review_info = {\n 'title': title,\n 'body': message,\n 'head': head,\n 'base': base,\n }\n\n data = json_encode(review_info)\n review = parse(gh_request('POST', '/repos/:user/:repo/pulls', body=data))\n printers.print_review_created(review)", "def review_add(request):\n result = {}\n\n u = request.user\n\n p = Product.objects.get_by_sku(request.POST['sku'])\n\n if p is None:\n result[\"result\"] = '0'\n elif TransactionLineItem.objects.filter(transaction__party=u, product=p).count() > 0:\n # need to check if I bought this item\n\n r, created = Review.objects.get_or_create(reviewer=u, product=p)\n r.content =request.POST['content']\n r.rating=int(request.POST['rating'])\n\n # reply to review request\n rto = request.POST.get('reply_to', None)\n if rto:\n rev_request = ReviewRequest.objects.get(id=int(rto))\n r.reply_to.add(rev_request)\n # change wish item review status to review=2\n for w in Wishlist.objects.filter(product=p, party=rev_request.requester):\n w.review = Wishlist.REVIEW_RESPONDED\n w.save()\n \n r.public = bool(request.POST['public'])\n r.save() \n\n # add a feed\n f = Feed(actor=u, action=Feed.REVIEWED, product=p) \n f.save()\n \n result[\"result\"] = str(r.id)\n else:\n result['result'] = '-1'\n\n return JSONHttpResponse(result)", "def review_by_id(review_id):\n review = storage.get(\"Review\", review_id)\n if review is None:\n abort(404)\n return jsonify(review.to_json())", "def test_add_remove_review(self):\n\n user1 = User.objects.create_user('John')\n self.book.reviews.create(\n user=user1,\n rating=5,\n notes=\"It's so awesome\"\n )\n\n user2 = User.objects.create_user('Jane')\n review = self.book.reviews.create(\n user=user2,\n rating=4,\n notes=\"Love it\"\n )\n\n # need to reload from database for updated rating value in book\n book = Book.objects.get(id=self.book.id)\n self.assertAlmostEqual(book.rating, 4.5)\n\n review.delete()\n\n book = Book.objects.get(id=self.book.id)\n self.assertAlmostEqual(book.rating, 5)", "def test_create_game_review(self):\n\n url = \"/reviews\"\n data = {\n \"content\": \"This game is literally so cool!\",\n \"game_id\": 1\n }\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)\n response = self.client.post(url, data, format='json')\n\n json_response = json.loads(response.content)\n \n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n self.assertEqual(json_response[\"content\"], \"This game is literally so cool!\")\n self.assertEqual(json_response[\"game_id\"], 1)", "def new_review_view(request):\n data = {'success': False, 'msg': ''}\n if request.method == 'POST':\n # check if the user has already logged in.\n # if user has not logged in, return an error msg to frontend.\n # if user has logged in, let user create a new review\n if not request.session.get('login_flag', None):\n data['msg'] = 'user does not log in'\n return JsonResponse(data)\n # else use is logged in\n user_name = request.session.get('name', None)\n # return user_obj by user_name from login.models.User database\n try:\n user_obj = login.models.User.objects.get(name=user_name)\n except ObjectDoesNotExist:\n data['msg'] = 'does not have user: ' + str(user_name)\n return JsonResponse(data)\n\n req = simplejson.loads(request.body)\n movie_id = req.get('movie_id', None)\n review_comment = req.get('review_comment', None)\n rating_number = req.get('rating_number', None)\n\n # check if either movie_id, review_comment, rating_number, is empty\n if movie_id is None or review_comment is None or rating_number is None:\n data['msg'] = 'movie_id, review_comment, rating_number are required'\n return JsonResponse(data)\n\n # check movie_id is a positive integer and rating_number is a positive number\n try:\n movie_id = int(movie_id)\n rating_number = float(rating_number)\n if not (movie_id > 0 and rating_number >= 0):\n data['msg'] = 'movie_id must be a positive integer, ' + \\\n 'review_comment must be a string, ' + \\\n 'rating_number must be a positive number'\n return JsonResponse(data)\n except:\n data['msg'] = 'movie_id must be a positive integer, ' + \\\n 'review_comment must be a string, ' + \\\n 'rating_number must be a positive number'\n return JsonResponse(data)\n\n # return movie_obj by movie_id from models.Movie database\n try:\n movie_obj = models.Movie.objects.get(mid=movie_id)\n except ObjectDoesNotExist:\n data['msg'] = 'does not have movie with movie_id: ' + str(movie_id)\n return JsonResponse(data)\n\n date = datetime.datetime.now(timezone.utc)\n\n try:\n # create a new record for the new review in database.\n models.Review.objects.create(user=user_obj, movie=movie_obj, review_comment=review_comment,\n rating_number=rating_number, date=date)\n # update the average_rating and votecount for the movie.\n update_movie_rating_record(movie_id, float(rating_number), 'new')\n except IntegrityError:\n data['msg'] = 'each user can only leave one review for a movie, but reviews are editable'\n return JsonResponse(data)\n else:\n data['success'] = True\n data['msg'] = 'successfully create a new review'\n return JsonResponse(data)\n else:\n data['msg'] = 'please use POST'\n return JsonResponse(data)", "def review():\r\n\r\n # Ensure isbn_number is submitted\r\n if not request.form.get(\"isbn_number\"):\r\n return apology(\"Invalid book\", 403)\r\n\r\n # Ensure review is submitted\r\n if not request.form.get(\"review\"):\r\n return apology(\"Text is not submitted\", 403)\r\n\r\n # Check if book exist, if not error out\r\n\r\n # add review to db\r\n\r\n return redirect(url_for(details, isbn_number=request.form.get(\"isbn_number\")))", "def mutate(self, info, input):\n user = info.context.user\n\n # Check if study exists\n _, study_id = from_global_id(input[\"study\"])\n try:\n study = Study.objects.get(pk=study_id)\n except Study.DoesNotExist:\n raise GraphQLError(f\"Study {study_id} not found.\")\n\n # Check permissions\n # Must have general add permission for any study OR\n # permission to add review to user's studies\n if not (\n user.has_perm(\"data_reviews.add_datareview\")\n or (\n user.has_perm(\"data_reviews.add_my_study_datareview\")\n and user.studies.filter(kf_id=study.kf_id).exists()\n )\n ):\n raise GraphQLError(\"Not allowed\")\n\n # Create data_review\n with transaction.atomic():\n data_review = DataReview(\n **{\n k: input[k]\n for k in input\n if k not in {\"study\", \"versions\"}\n }\n )\n data_review.study = study\n data_review.creator = user\n data_review.save()\n\n # Check files in review\n review_version_ids = check_review_files(input, data_review)\n\n # Review files are valid and they've changed\n if review_version_ids:\n # Update versions\n data_review.versions.set(review_version_ids)\n\n # Clear the data review's validation results if they exist\n try:\n data_review.validation_resultset.delete()\n except ValidationResultset.DoesNotExist:\n pass\n\n # Start review if we have files\n data_review.start()\n data_review.save()\n\n return CreateDataReviewMutation(data_review=data_review)", "def product_review_form(request):\n if request.method=='POST':\n service = ServiceProvider.objects.filter(creator=request.user).first()\n product = Product.objects.get(created_by=service)\n form=ReviewCreationForm(request.POST)\n form.instance.created_by = request.user\n# This is for service provider reviews it self not product so no need for it\n# form.instance.review_of=service\n form.instance.product= product\n form.save()\n return redirect('public:product_detail', product.pk)\n form=ReviewCreationForm()\n return render(request, 'product_detail.html', {'form':form})", "def rate_review_for_user():\n values = flask.request.values\n review_id = values.get('review_id')\n voted_helpful = values.get('voted_helpful')\n review_type = values.get('review_type')\n\n uc_review = None\n filtered_courses = m.UserCourse.objects(id=review_id)\n if len(filtered_courses) > 0:\n uc = filtered_courses[0]\n if review_type == 'course':\n uc_review = uc.course_review\n else:\n uc_review = uc.professor_review\n else:\n filtered_courses = m.MenloCourse.objects(id=review_id)\n if len(filtered_courses) > 0:\n uc = filtered_courses[0]\n uc_review = uc.professor_review\n\n vote_added_response = api_util.jsonify({\n 'success': True\n })\n voted_already_response = api_util.jsonify({\n 'already_voted': True\n })\n\n user = _get_user_require_auth()\n if review_type == 'course':\n if review_id in user.voted_course_review_ids:\n return voted_already_response\n user.voted_course_review_ids.append(review_id)\n elif review_type == 'prof':\n if review_id in user.voted_prof_review_ids:\n return voted_already_response\n user.voted_prof_review_ids.append(review_id)\n user.save()\n\n if uc_review:\n if voted_helpful == 'true':\n uc_review.num_voted_helpful += 1\n else:\n uc_review.num_voted_not_helpful += 1\n uc.save()\n\n return vote_added_response", "def test_Review(self):\n my_Review = Review()\n my_Review.name = \"LA\"\n self.assertEqual(my_Review.name, 'LA')", "def test_instance(self):\n self.assertIsInstance(self.new_review, Review)", "def save(self):\n if self.id:\n self.update()\n else:\n self.create()", "def review_by_id(review_id):\n obj = storage.get(\"Review\", review_id)\n if obj is None:\n abort(404)\n return jsonify(obj.to_dict())", "def test_reviews_model(self):\n\n self.user.save()\n query_user = User.query.filter_by(email='[email protected]').first()\n\n business = Business('CosmasTech', 'Technology', 'Nairobi',\n 'AI is transforming human life', query_user.id)\n business.save()\n query_business = Business.query.filter_by(name='cosmastech').first()\n\n review = Reviews('The business will really save the world!',\n query_business.id, query_user.id)\n review.save()\n query_reviews = Reviews.query.filter_by(id=1).first()\n\n self.assertEqual(\n query_reviews.review, 'The business will really save the world!')", "def create_rating(user, wine, rating):\n\n rating = Rating(user=user, wine=wine, rating=rating)\n # rating = Rating(user=user, wine=wine, rating=rating, date=datetime.datetime.now())\n #rating = Rating(user_id=user_id, wine_id=wine_id, rating_id=rating_id)\n #above, can do that instead of passing object\n\n db.session.add(rating)\n db.session.commit()\n\n return rating", "def submit_comment(book_id):\n \n #Information for inserting\n score = request.form.get(\"score\")\n comment = request.form.get(\"comment\")\n\n if score is None or comment is None:\n return render_template(\"error.html\",message=\"Please submit the complete information.\")\n\n #Inserte a new review\n db.execute(\"INSERT INTO reviewer (id_book, id_user, comment, score_user) VALUES (:id_book, :id_user, :comment, :score_user)\",\n {\"id_book\":book_id, \"id_user\":session[\"user_id\"], \"comment\": comment, \"score_user\": score})\n \n db.commit()\n\n #Get the info of the book\n book = db.execute(\"SELECT * FROM book WHERE id = :book_id\",{\"book_id\": book_id}).fetchone()\n if book is None:\n return render_template(\"error.html\", message = \"No such Book.\")\n\n #Get the reviews joined with the name of the user\n stmt = \"SELECT user_library.*, reviewer.* FROM user_library INNER JOIN reviewer ON user_library.id=reviewer.id_user WHERE id_book = :book_id\"\n reviews = db.execute(stmt,{\"book_id\": book_id}).fetchall()\n\n #Get the user_review info\n user_review = db.execute(\"SELECT * FROM reviewer WHERE id_book = :book_id AND id_user = :user_id\",\n {\"book_id\": book_id, \"user_id\": session[\"user_id\"]}).fetchone()\n\n #If this info not exist we could add a comment, else we can not.\n is_commented = True\n if user_review is None:\n is_commented = False\n\n #Insert a new score if a new comment is introduced\n average_score = db.execute(\"SELECT AVG(score_user) FROM reviewer WHERE id_book = :book_id\",{\"book_id\":book_id}).fetchone()\n average_score = average_score.items()\n average_score = average_score[0]\n average_score = float(average_score[1])\n\n db.execute(\"UPDATE book SET score = :average_score WHERE id = :book_id\", {\"average_score\":average_score, \"book_id\": book_id}) \n db.commit()\n\n #Get the info of the book\n book = db.execute(\"SELECT * FROM book WHERE id = :book_id\",{\"book_id\": book_id}).fetchone()\n if book is None:\n return render_template(\"error.html\", message = \"No such Book.\")\n\n #Proccess for rating count of Goofreaders\n goodreader_info = requests.get(\"https://www.goodreads.com/book/review_counts.json\", params={\"key\": KEY, \"isbns\": book.isbn })\n goodreader_info = goodreader_info.json()\n goodreader_info = goodreader_info[\"books\"]\n\n average_rating = goodreader_info[0][\"average_rating\"]\n ratings_counts = goodreader_info[0][\"ratings_count\"]\n\n return render_template(\"book_info.html\",book=book, reviews = reviews, is_commented = is_commented\n , average_rating = average_rating, ratings_counts = ratings_counts )", "def add_review(product_id):\n if request.method == 'POST':\n \"\"\"\n Gets the next search perameter from the URL. Code is from https://\n blog.tecladocode.com/handling-the-next-url-when-logging-in-with-flask/\n \"\"\"\n next_url = request.form.get('next')\n\n \"\"\"\n Gets the product's ratings from the database and counts the number of\n reviews in the database for the product. Count method is from https://\n docs.mongodb.com/manual/reference/method/db.collection.count/\n \"\"\"\n product_ratings = mongo.db.products.find_one(\n {\"_id\": ObjectId(product_id)}, product_ratings_query())\n\n product_count = mongo.db.reviews.count(\n {\"product\": product_ratings['name']})\n\n \"\"\"\n Adds the details entered into the form to a dictionary. Datetime\n method is from https://www.w3schools.com/python/python_datetime.asp\n \"\"\"\n review = {\n \"overall_rating\": int(request.form.get('overall_rating')),\n \"performance_rating\": int(request.form.get('performance_rating')),\n \"usability_rating\": int(request.form.get('usability_rating')),\n \"price_rating\": int(request.form.get('price_rating')),\n \"quality_rating\": int(request.form.get('quality_rating')),\n \"review_title\": request.form.get('review_title'),\n \"review\": request.form.get('review'),\n \"product\": product_ratings['name'],\n \"date_added\": datetime.datetime.now(),\n \"reviewed_by\": \"{} {}\".format(session['user']['first_name'],\n session['user']['last_name'])\n }\n\n \"\"\"\n Calculates the product's new ratings and updates them in the database.\n Update one method is from https://docs.mongodb.com/manual/\n reference/method/db.collection.updateOne/\n \"\"\"\n new_ratings = add_ratings(product_ratings, product_count, review)\n\n mongo.db.products.update_one(\n {'_id': ObjectId(product_id)}, {\"$set\": new_ratings})\n\n mongo.db.products.update_one(\n {'_id': ObjectId(product_id)},\n star_rating(new_rating=int(request.form.get('overall_rating'))))\n\n # Adds the review to the database\n mongo.db.reviews.insert_one(review)\n\n \"\"\"\n Code for message categories is from https://flask.palletsprojects.com/\n en/1.1.x/patterns/flashing/\n \"\"\"\n flash(\"Review Successfully Added\", \"success\")\n\n return redirect(next_url)\n\n else:\n \"\"\"\n Aborts the request and returns a 400 status code if the URL does not\n contain a next search perameter. Code is from https://www.kite.com/\n python/answers/how-to-get-parameters-from-a-url-using-flask-in-python\n and https://flask.palletsprojects.com/en/1.1.x/api/#flask.abort\n \"\"\"\n if request.args.get('next') is None:\n abort(400)\n\n \"\"\"\n Gets the product's details from the products databse and aborts the\n request and returns a 404 status code if the product does not exist.\n Code is from https://flask.palletsprojects.com/en/1.1.x/api\n /#flask.abort\n \"\"\"\n product = mongo.db.products.find_one({'_id': ObjectId(product_id)})\n\n if product is None:\n abort(404)\n\n return render_template(\"add_review.html\", page_title=\"Add Review\",\n product_id=product_id)", "def add_restaurant_review():\n username = sign_up.get_username()\n if username:\n add_var = dict(user=username, restaurant_name=\"\", restaurant_address=\"\",\n restaurant_item=\"\", item_comments=\"\", item_price=\"\",\n restaurant_ranking=\"\", restaurant_rating=\"\",\n restaurant_rating_reason=\"\", address=\"\", restaurant_chosen=\"\",\n address_chosen=\"\")\n return bottle.template('add_review', add_var=add_var)\n else:\n return bottle.template('login',\n dict(user_error=\"Sorry, you need to be logged in to submit a review, please log below:\", pw_error=\"\"))", "def post_review(place_id):\n required_data = {\"text\", \"user_id\"}\n return post(cls, parent_cls, place_id, required_data)", "def up_vote():\n review_id = review_id = request.form.get('review_id')\n\n mongo.db.reviews.update_one(\n {'_id': ObjectId(review_id)}, {\"$inc\": {\"up_vote\": 1}})\n\n up_vote = mongo.db.reviews.find_one({\"_id\": ObjectId(review_id)},\n {\"up_vote\": 1, \"_id\": 0})\n\n return jsonify({\"up_vote\": up_vote['up_vote'], \"success\": True})", "def update(self, request, pk=None):\n order_product = Order_Products.objects.get(pk=pk)\n product = Product.objects.get(pk=request.data['product_id'])\n order = Order.objects.get(pk=request.data['order_id'])\n order_product.review = request.data['review']\n order_product.product = product\n order_product.order = order\n order_product.save()\n \n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def service_review_form(request):\n if request.method == 'POST':\n service = ServiceProvider.objects.filter(creator=request.user).first()\n form = ReviewCreationForm(request.POST)\n form.instance.created_by = request.user\n\n form.instance.review_of=service\n# this is for product not service so no need for it\n# form.instance.product = Product.objects.get(created_by=service)\n form.save()\n return redirect('public:service_detail', service.pk)\n form = ReviewCreationForm()\n return render(request, 'service_detail.html', {'form': form})", "def create(self, request, pk):\n # can only create on base resource\n if pk is not None:\n raise NotImplemented('POST')\n\n # create new object of model and update\n self._object_update(self.model(), request.data)", "def practices_create():\n practice = Practice()\n form = PracticeCreateForm()\n if form.validate_on_submit():\n\n form.populate_obj(practice)\n db.session.add(practice)\n db.session.commit()\n return redirect(url_for('practices.home'))\n return render_template('practices/create.html', form=form)", "def post(self, request, *args, **kwargs):\n rating = request.data['rating']\n if rating < 0 or rating > 5:\n return Response({'detail': 'Invalid rating!'}, status.HTTP_400_BAD_REQUEST)\n\n data = {\n 'igdb': request.data['igdb'],\n 'name': request.data['name'],\n 'slug': request.data['slug'],\n 'cover_id': request.data['cover_id'],\n 'backdrop_id': request.data['backdrop_id']\n }\n game, _ = Game.objects.get_or_create(**data)\n user = CustomUser.objects.get(id=request.user.id)\n\n r, _ = Ratings.objects.get_or_create(game=game, user=user)\n r.rating = rating\n r.save()\n\n serializer = RatingSerializer(r).data\n\n return Response(serializer)", "def test_database(self):\n review = Review(project=self.new_project, user=self.new_user, design=7, usability=6, content=5, comment=\"This is a nice website.\")\n review.save()\n reviews = Review.objects.all()\n\n self.assertTrue(len(reviews) > 0)", "def edit_review(review_id):\n if request.method == 'POST':\n \"\"\"\n Gets the next search perameter from the URL. Code is from https://\n blog.tecladocode.com/handling-the-next-url-when-logging-in-with-flask/\n \"\"\"\n next_url = request.form.get('next')\n\n # Gets the review's and product's ratings from the database\n user_ratings = mongo.db.reviews.find_one(\n {'_id': ObjectId(review_id)}, user_ratings_query())\n\n product_ratings = mongo.db.products.find_one(\n {\"name\": user_ratings['product']}, product_ratings_query())\n\n \"\"\"\n Counts the number of reviews in the database for the product.\n Count method is from https://docs.mongodb.com/manual/\n reference/method/db.collection.count/\n \"\"\"\n product_count = mongo.db.reviews.count(\n {\"product\": user_ratings['product']})\n\n \"\"\"\n Adds the details entered into the form to a dictionary. Datetime method\n is from https://www.w3schools.com/python/python_datetime.asp\n \"\"\"\n review = {\n \"overall_rating\": int(request.form.get('overall_rating')),\n \"performance_rating\": int(request.form.get('performance_rating')),\n \"usability_rating\": int(request.form.get('usability_rating')),\n \"price_rating\": int(request.form.get('price_rating')),\n \"quality_rating\": int(request.form.get('quality_rating')),\n \"review_title\": request.form.get('review_title'),\n \"review\": request.form.get('review'),\n \"date_added\": datetime.datetime.now(),\n }\n\n \"\"\"\n Calculates the product's new ratings and updates them in the database.\n Update one method is from https://docs.mongodb.com/manual/reference\n /method/db.collection.updateOne/\n \"\"\"\n new_ratings = edit_ratings(\n user_ratings, product_ratings, product_count, review)\n\n mongo.db.products.update_one(\n {'_id': product_ratings['_id']}, {\"$set\": new_ratings})\n\n if (int(request.form.get('overall_rating')) != user_ratings\n ['overall_rating']):\n\n mongo.db.products.update_one({\"_id\": review_id}, star_rating(\n request.form.get('overall_rating'), user_ratings\n ['overall_review']))\n\n mongo.db.reviews.update_one(\n {'_id': ObjectId(review_id)}, {\"$set\": review})\n\n \"\"\"\n Code for message categories is from https://flask.palletsprojects.com/\n en/1.1.x/patterns/flashing/\n \"\"\"\n flash(\"Review Successfully Updated\", \"success\")\n\n return redirect(next_url)\n\n else:\n \"\"\"\n Aborts the request and returns a 400 status code if the URL does not\n contain a next search perameter. Code is from https://www.kite.com/\n python/answers/how-to-get-parameters-from-a-url-using-flask-in-python\n and https://flask.palletsprojects.com/en/1.1.x/api/#flask.abort\n \"\"\"\n if request.args.get('next') is None:\n abort(400)\n\n \"\"\"\n Gets the product's details from the products databse and aborts the\n request and returns a 404 status code if no review is found or\n a 403 status if the review author is not the user currently signed in.\n Code is from https://flask.palletsprojects.com/en/1.1.x/api\n /#flask.abort and https://docs.mongodb.com/manual/tutorial/\n project-fields-from-query-results/\n \"\"\"\n review = mongo.db.reviews.find_one(\n {\"_id\": ObjectId(review_id)}, {\"reviewed_by\": 1, \"_id\": 0})\n\n if review is None:\n return abort(404)\n\n elif \"{} {}\".format(session['user']['first_name'], session['user']\n ['last_name']) != review['reviewed_by']:\n return abort(403)\n\n else:\n # Gets the review from the database\n review = mongo.db.reviews.find_one({'_id': ObjectId(review_id)})\n\n return render_template('edit_review.html',\n page_title='Edit Review', review=review)", "def test_instance_Review(self):\n obj = Review()\n self.assertIsInstance(obj, BaseModel)", "def add_review(db_id):\r\n\r\n if request.args['collection'] == 'recipe':\r\n # validates request form\r\n form = request.form\r\n error_list = validate_form(form, 'review')\r\n\r\n if error_list == []:\r\n # adds review to recipe\r\n mongo.db.recipes.update(\r\n {'_id': ObjectId(db_id)}, {'$push': {\r\n 'reviews': request.form.get('review')}\r\n }\r\n )\r\n\r\n # redirects to the recipe\r\n return redirect(url_for(\r\n 'view',\r\n db_id=db_id,\r\n collection='recipes')\r\n )\r\n\r\n else:\r\n # initializes page title\r\n page_title = 'View a recipe'\r\n\r\n # sends error list back to the form to correct mistakes\r\n return render_template(\r\n 'view.html',\r\n recipe=mongo.db.recipes.find_one({'_id': ObjectId(db_id)}),\r\n errors=error_list, form=form,\r\n page_title=page_title\r\n )\r\n\r\n elif request.args['collection'] == 'appliance':\r\n # validates request form\r\n form = request.form\r\n error_list = validate_form(form, 'review')\r\n\r\n if error_list == []:\r\n # adds review to the appliance\r\n mongo.db.appliances.update({'_id': ObjectId(db_id)}, {'$push': {\r\n 'reviews': request.form.get('review')}}\r\n )\r\n\r\n # redirects to the appliance\r\n return redirect(url_for(\r\n 'view',\r\n db_id=db_id,\r\n collection='appliances')\r\n )\r\n\r\n else:\r\n # initializes page title\r\n page_title = 'View an appliance'\r\n\r\n # sends error list back to the form to correct mistakes\r\n return render_template(\r\n 'view.html',\r\n appliance=mongo.db.appliances.find_one(\r\n {'_id': ObjectId(db_id)}),\r\n errors=error_list,\r\n form=form\r\n )\r\n\r\n else:\r\n # returns an error message on incorrect argument\r\n return render_template(\r\n 'error.html',\r\n msg='Bad argument error! (/add_review)'\r\n )", "def edit_review_view(request):\n data = {'success': False, 'msg': ''}\n if request.method == 'POST':\n # check if the user has already logged in.\n # if user has not logged in, return an error msg to frontend.\n # if user has logged in, let user edit review\n if not request.session.get('login_flag', None):\n data['msg'] = 'user does not log in'\n return JsonResponse(data)\n # else use is logged in\n user_name = request.session.get('name', None)\n # return user_obj by user_name from login.models.User database\n try:\n user_obj = login.models.User.objects.get(name=user_name)\n except ObjectDoesNotExist:\n data['msg'] = 'does not have user: ' + str(user_name)\n return JsonResponse(data)\n\n req = simplejson.loads(request.body)\n movie_id = req.get('movie_id', None)\n review_comment = req.get('review_comment', None)\n rating_number = req.get('rating_number', None)\n\n # check if either movie_id, review_comment, rating_number, is empty\n if movie_id is None or review_comment is None or rating_number is None:\n data['msg'] = 'movie_id, review_comment, rating_number are required'\n return JsonResponse(data)\n\n # check movie_id is a positive integer and rating_number is a positive number\n try:\n movie_id = int(movie_id)\n rating_number = float(rating_number)\n if not (movie_id > 0 and rating_number >= 0):\n data['msg'] = 'movie_id must be a positive integer, ' + \\\n 'review_comment must be a string, ' + \\\n 'rating_number must be a positive number'\n return JsonResponse(data)\n except:\n data['msg'] = 'movie_id must be a positive integer, ' + \\\n 'review_comment must be a string, ' + \\\n 'rating_number must be a positive number'\n return JsonResponse(data)\n\n # return movie_obj by movie_id from models.Movie database\n try:\n movie_obj = models.Movie.objects.get(mid=movie_id)\n except ObjectDoesNotExist:\n data['msg'] = 'does not have movie with movie_id: ' + str(movie_id)\n return JsonResponse(data)\n\n # return review_obj, from models.Review database, by giving user_obj, movie_obj\n try:\n review_obj = models.Review.objects.get(user=user_obj, movie=movie_obj)\n except ObjectDoesNotExist:\n data['msg'] = \"the current user didn't leave a review for movie_id: \" + str(movie_id)\n return JsonResponse(data)\n else:\n # get the previous rating_number\n prev_rating_number = review_obj.rating_number\n # update review_obj\n date = datetime.datetime.now(timezone.utc)\n review_obj.review_comment = review_comment\n review_obj.rating_number = rating_number\n review_obj.date = date\n review_obj.save()\n # update the average_rating and votecount for the movie.\n update_movie_rating_record(movie_id, float(rating_number - prev_rating_number), 'edit')\n # return msg\n data['success'] = True\n data['msg'] = 'successfully edit review'\n return JsonResponse(data)\n\n else:\n data['msg'] = 'please use POST'\n return JsonResponse(data)", "def review_vote_put_handler(review_id, user):\n def fetch_params():\n placet = Parser.bool('json', 'placet')\n return placet\n review = Review.query.get_or_404(str(review_id))\n if review.is_archived is True:\n raise NotFound\n placet = fetch_params()\n if review.user_id == user.id:\n raise InvalidRequest(desc='You cannot rate your own review.')\n if user.is_vote_limit_exceeded is True and user.has_voted(review) is False:\n raise LimitExceeded('You have exceeded your limit of votes per day.')\n if placet is True and user.user_type not in review.review_class.upvote:\n raise InvalidRequest(desc='You are not allowed to upvote this review.')\n if placet is False and user.user_type not in review.review_class.downvote:\n raise InvalidRequest(desc='You are not allowed to downvote this review.')\n Vote.create(user, review, placet) # overwrites an existing vote, if needed\n return jsonify(message='Request processed successfully')", "def add_review(self, rid, review, exts, w2v, threshold):\n self.rids.append(rid)\n self.reviews.append(review)\n cur_exts = []\n for ext in exts:\n if len(ext.strip()) < 1:\n continue\n opn, asp, att, pol = ext.split(\",\")\n ext_obj = Extraction(opn, asp, att, pol, w2v, threshold)\n if ext_obj.is_valid and ext_obj.emb is not None:\n cur_exts.append(ext_obj)\n self.exts.append(cur_exts)", "def review(self) -> object:\n return self._review", "def update_chart_review(self, request):\n data = request.data\n project_id = data.get('project_id', None)\n patient_id = data.get('patient_id', None)\n crf_template_id = data.get('crf_template_id', None)\n cohort_id = data.get('cohort_id', None)\n instance = None\n try:\n instance = PatientChartReview.objects.get(\n project_id=project_id, patient_id=patient_id, crf_template_id=crf_template_id, cohort_id=cohort_id)\n except PatientChartReview.DoesNotExist:\n raise Http404\n serializer = self.get_serializer(instance, data=data)\n if not serializer.is_valid():\n return Response({'errors': serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n updated_obj = serializer.save()\n chart_review = PatientChartReviewRetrieveSerializer(updated_obj)\n return Response(chart_review.data, status=status.HTTP_200_OK)", "def update_review(self, new_review):\n new_review = new_review.convert_review_to_dict()\n self.review[\"status\"] = new_review[\"status\"]\n if new_review[\"last_reviewed_by\"]:\n self.review[\"last_reviewed_by\"] = new_review[\"last_reviewed_by\"]\n self._upd_reviewers(new_review[\"reviewers\"])\n self.update_attrs(review_status=new_review[\"status\"])", "def perform_create(self, serializer):\n # required for perform_create(); creates the score object in database\n score = serializer.save()\n\n # trigger update function for engine (bayes update if adaptive)\n log.debug(\"Triggering engine update from score\")\n engine = get_engine()\n engine.update_from_score(score.learner, score.activity, score.score)" ]
[ "0.7153411", "0.6968481", "0.6635294", "0.6537341", "0.65146786", "0.6502004", "0.6494259", "0.64685404", "0.6442921", "0.64043444", "0.64022094", "0.630013", "0.6273182", "0.62641233", "0.62522465", "0.6238617", "0.6231669", "0.62089", "0.62036234", "0.62017196", "0.6173334", "0.6168386", "0.6154029", "0.6145187", "0.61419266", "0.6126825", "0.6079386", "0.6055918", "0.60305315", "0.60248023", "0.6005152", "0.5999101", "0.5991105", "0.59835577", "0.5980728", "0.59579426", "0.59005755", "0.58906096", "0.58751357", "0.5847081", "0.58297646", "0.5818238", "0.5791114", "0.5734569", "0.5719163", "0.57086885", "0.5677793", "0.56692755", "0.56636566", "0.56477046", "0.5630343", "0.5626879", "0.5620331", "0.5615129", "0.5602657", "0.55790496", "0.55723083", "0.5550905", "0.55486083", "0.5533023", "0.55316633", "0.5501717", "0.5493033", "0.54601955", "0.5456072", "0.5448152", "0.54452366", "0.5442015", "0.5419411", "0.5406953", "0.53887486", "0.5376381", "0.5372586", "0.5370106", "0.5369318", "0.53667617", "0.5345049", "0.53390294", "0.53179526", "0.531404", "0.5313988", "0.52827686", "0.527977", "0.52446604", "0.5241565", "0.5231871", "0.52297", "0.52265775", "0.522579", "0.52240944", "0.522207", "0.52159256", "0.52102625", "0.52056384", "0.5203399", "0.5192515", "0.5191138", "0.5182768", "0.51721007", "0.5147915" ]
0.6188052
20
Plot the new state of the system
def plotSate(s,i,seed): fig, ax = plt.subplots() im = ax.imshow(s) plt.xticks([i for i in range(dim)], "") plt.yticks([i for i in range(dim)], "") fig.tight_layout() plt.savefig("Systems/" + str(dim) + "_" + str(seed) + "/Images/" + str(i) + ".jpeg",quality=80,optimize=True, dpi=80,progressive=True,transparent=True) fig.clear() plt.close(fig)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot(self) -> None:\n if self.__fig is None:\n self.__fig = plt.figure()\n\n xv = []\n yv = []\n for x in np.arange(self.state_min(), self.state_max(), self.state_step()):\n xv.append(x)\n yv.append(self.reward(x))\n ax = self.__fig.gca()\n ax.set_xlabel('X (State)')\n ax.set_ylabel('Y (Reward)')\n ax.set_title('Reward Function')\n ax.plot(xv, yv)\n plt.pause(self.__plot_pause)\n plt.show(block=False)\n return", "def update_plot():\n pass", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def plot_ins_state(time, state):\n pylab.ion()\n\n plot_trajectory(state[:,0], state[:,1], state[:,2])\n\n\n # Plot position vs. time\n\n\n pylab.figure()\n pylab.subplot(311)\n pylab.plot(time, state[:,0],'r')\n pylab.xlabel('time (s)')\n pylab.ylabel('$\\\\phi$, rad')\n pylab.title('Latitude')\n pylab.grid(True)\n\n pylab.subplot(312)\n pylab.plot(time, state[:,1],'g')\n pylab.xlabel('time (s)')\n pylab.ylabel('$\\\\lambda$, rad')\n pylab.title('Longitude')\n pylab.grid(True)\n\n pylab.subplot(313)\n pylab.plot(time, state[:,2],'b')\n pylab.xlabel('time, s')\n pylab.ylabel('$h$, m')\n pylab.title('Altitude')\n pylab.grid(True)\n pylab.show()\n\n\n # Plot velocity vs. time\n pylab.figure()\n pylab.plot(time, state[:,3:6])\n pylab.xlabel('time, s')\n pylab.ylabel('Vn, Ve, Vd')\n pylab.title('Velocity vs. time')\n\n pylab.grid(True)\n pylab.show()\n\n # Plot acceleration vs. time\n pylab.figure()\n pylab.plot(time, state[:,6:9])\n pylab.xlabel('time, s')\n pylab.ylabel('an, ae, ad')\n pylab.title('Acceleration vs. time')\n\n pylab.grid(True)\n pylab.show()\n pylab.ioff()\n\n # Plot quaternions vs. time\n pylab.figure()\n pylab.plot(time, state[:,9:])\n pylab.xlabel('time, s')\n pylab.ylabel('q0, q1, q2, q3')\n pylab.title('Quaternion vs. time')\n\n pylab.grid(True)\n pylab.show()\n pylab.ioff()", "def store(self, state):\n if self.interactive:\n self._fig.clear()\n fig = self._fig\n else:\n fig = plt.figure()\n\n self._plot_function(fig, copy_state(state))\n\n fig.canvas.draw()\n if not self.interactive:\n plt.show()", "def draw(self):\r\n dt = m.get_instance().dt\r\n self.perception_history = m.get_instance().larvae[0].history\r\n t = np.arange(0,len(self.perception_history)*dt,dt)\r\n plt.plot(t,self.perception_history)\r\n plt.title('Perception History')\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Perception (uM)')\r\n plt.show()", "def plot(self):\n pass", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.canvas.draw()", "def plot_graph(self) -> None:", "def plot(self):\n fig, ax = plt.subplots()\n ax.set_title(\"Covid-19 Progression Simulation\")\n ax.set_xlabel(\"X Position\")\n ax.set_ylabel(\"Y Position\")\n\n x_values = np.array([])\n y_values = np.array([])\n color_values = np.array([])\n\n for p in self.persons:\n x_values = np.append(x_values, p.position[0])\n y_values = np.append(y_values, p.position[1])\n color_values = np.append(color_values, self.color(p.state))\n\n colors = [\"green\", \"red\", \"blue\", \"black\"]\n\n scatter = ax.scatter(x_values, y_values,\n c=color_values, vmin=0, vmax=100)\n\n ax.legend(handles=self.legend_elements, loc='upper right')\n\n self.anim = manim.FuncAnimation(\n fig, self.animate, interval=self.update_interval, fargs=(self, ax, scatter))\n\n plt.tight_layout()\n plt.show()", "def visualize_environment(self,env_state):\n fig=plt.figure(figsize=self.figsize)\n ax=plt.subplot(111)\n #Plot the targets\n plt.plot([i[0] for i in self.coordinates__targets],\\\n [i[1] for i in self.coordinates__targets],\\\n marker='x',markersize=15,linestyle='None',color='k',label='Target')\n plot_target_values = True\n if plot_target_values:\n for i ,t in enumerate(self.coordinates__targets):\n plt.text(t[0],t[1],self.target_values[i])\n #Plot the towers\n tower_colors = ['r','b','g']\n for tk in xrange(self.N_tower_kinds):\n plt.plot([i[0] for i in self.coordinates__tower_sites[tk]],\\\n [i[1] for i in self.coordinates__tower_sites[tk]],\\\n marker='o',markersize=10,linestyle='None',color=tower_colors[tk],alpha=.5,label='Tower {} Sites'.format(tk+1))\n if env_state == 'solved':\n for tk in xrange(self.N_tower_kinds):\n plt.plot([i[0] for i in self.coordinates__solved_towers[tk]],\\\n [i[1] for i in self.coordinates__solved_towers[tk]],\\\n marker='^',markersize=20,linestyle='None',color=tower_colors[tk],label='Tower {} Placed'.format(tk+1))\n for x,y,w,h in self.coordinates__obstacles:\n r = plt.Rectangle((x,y),w,h,fc='c')\n ax.add_patch(r)\n plt.xlim(0,self.map_dimensions[1])\n plt.ylim(0,self.map_dimensions[0])\n plt.legend(numpoints=1,loc='best')\n savename = 'SolvedMap.png' if env_state == 'solved' else 'InitialMap.png'\n plt.savefig(savename)", "def plot_state(self, **options):\n f = plt.gcf()\n if len(f.axes) < 2:\n f, _ = plt.subplots(1, 2, figsize=(\n 13, 6), sharex='row', sharey='row')\n\n gp = self.target_model\n\n # Draw the GP surface\n visin.draw_contour(\n gp.predict_mean,\n gp.bounds,\n self.target_model.parameter_names,\n title='GP target surface',\n points=gp.X,\n axes=f.axes[0],\n **options)\n\n # Draw the latest acquisitions\n if options.get('interactive'):\n point = gp.X[-1, :]\n if len(gp.X) > 1:\n f.axes[1].scatter(*point, color='red')\n\n displays = [gp.instance]\n\n if options.get('interactive'):\n from IPython import display\n displays.insert(\n 0,\n display.HTML('<span><b>Iteration {}:</b> Acquired {} at {}</span>'.format(\n len(gp.Y), gp.Y[-1][0], point)))\n\n # Update\n visin._update_interactive(displays, options)\n\n acq_index = self._get_acquisition_index(self.state['n_batches'])\n\n def acq(x):\n return self.acquisition_method.evaluate(x, acq_index)\n\n # Draw the acquisition surface\n visin.draw_contour(\n acq,\n gp.bounds,\n self.target_model.parameter_names,\n title='Acquisition surface',\n points=None,\n axes=f.axes[1],\n **options)\n\n if options.get('close'):\n plt.close()", "def plot(self):\n\t\tself.plotOfTF().plot()", "def plot(self):\n\t\tself.plotOfLoopVoltage()", "def show():\n\tplt.show()", "def plot():\n pass", "def plot_states(self,X,**kwargs):\n import numpy as np\n import matplotlib.pyplot as plt\n\n assert (np.shape(X)[0] in [6,8]) \\\n and (np.shape(X)[1] == len(self.time)) \\\n and (str(type(X)) == \"<class 'numpy.ndarray'>\"), \\\n \"X must be a (6,N) or (8,N) numpy.ndarray, where N is the length of t.\"\n\n\n Return = kwargs.get(\"Return\",False)\n assert type(Return)==bool, \"Return must be either True or False.\"\n\n InputString = kwargs.get(\"InputString\",None)\n assert InputString is None or type(InputString)==str, \"InputString must either be None or a str.\"\n\n NumStates = np.shape(X)[0]\n X[:6,:] = 180*X[:6,:]/np.pi # converting to deg and deg/s\n X[0,:] -= 180 # centering joint angle at 0 deg.\n if NumStates == 6:\n NumColumns = 2\n NumRows = 3\n else:\n NumColumns = 4\n NumRows = 2\n\n ColumnNumber = [el%2 for el in np.arange(0,NumStates,1)]\n RowNumber = [int(el/2) for el in np.arange(0,NumStates,1)]\n Units = [\n \"(Deg)\",\"(Deg/s)\",\n \"(Deg)\",\"(Deg/s)\",\n \"(Deg)\",\"(Deg/s)\",\n \"(N)\",\"(N)\"]\n if InputString is None:\n DescriptiveTitle = \"Plotting States vs. Time\"\n else:\n assert type(InputString)==str, \"InputString must be a string\"\n DescriptiveTitle = InputString + \" Driven\"\n if NumRows == 1:\n FigShape = (NumColumns,)\n else:\n FigShape = (NumRows,NumColumns)\n Figure = kwargs.get(\"Figure\",None)\n assert (Figure is None) or \\\n ( (type(Figure)==tuple) and \\\n (str(type(Figure[0]))==\"<class 'matplotlib.figure.Figure'>\") and\\\n (np.array([str(type(ax))==\"<class 'matplotlib.axes._subplots.AxesSubplot'>\" \\\n for ax in Figure[1].flatten()]).all()) and \\\n (Figure[1].shape == FigShape)\\\n ),\\\n (\"Figure can either be left blank (None) or it must be constructed from data that has the same shape as X.\\ntype(Figure) = \" + str(type(Figure)) + \"\\ntype(Figure[0]) = \" + str(type(Figure[0])) + \"\\nFigure[1].shape = \" + str(Figure[1].shape) + \" instead of (\" + str(NumRows) + \",\" + str(NumColumns) + \")\" + \"\\ntype(Figure[1].flatten()[0]) = \" + str(type(Figure[1].flatten()[0])))\n if Figure is None:\n fig, axes = plt.subplots(NumRows,NumColumns,figsize=(3.5*NumColumns,2*NumRows + 2),sharex=True)\n plt.subplots_adjust(top=0.85,bottom=0.15,left=0.075,right=0.975)\n plt.suptitle(DescriptiveTitle,Fontsize=20,y=0.975)\n for j in range(NumStates):\n axes[RowNumber[j],ColumnNumber[j]].spines['right'].set_visible(False)\n axes[RowNumber[j],ColumnNumber[j]].spines['top'].set_visible(False)\n axes[RowNumber[j],ColumnNumber[j]].plot(self.time,X[j,:])\n if not(RowNumber[j] == RowNumber[-1] and ColumnNumber[j]==0):\n plt.setp(axes[RowNumber[j],ColumnNumber[j]].get_xticklabels(), visible=False)\n # axes[RowNumber[j],ColumnNumber[j]].set_xticklabels(\\\n # [\"\"]*len(axes[RowNumber[j],ColumnNumber[j]].get_xticks()))\n else:\n axes[RowNumber[j],ColumnNumber[j]].set_xlabel(\"Time (s)\")\n axes[RowNumber[j],ColumnNumber[j]].set_title(r\"$x_{\" + str(j+1) + \"}$ \"+ Units[j])\n # if NumStates%5!=0:\n # [fig.delaxes(axes[RowNumber[-1],el]) for el in range(ColumnNumber[-1]+1,5)]\n else:\n fig = Figure[0]\n axes = Figure[1]\n for i in range(NumStates):\n if NumRows != 1:\n axes[RowNumber[i],ColumnNumber[i]].plot(self.time,X[i,:])\n else:\n axes[ColumnNumber[i]].plot(self.time,X[i,:])\n X[0,:] += 180 # returning to original frame\n X[:6,:] = np.pi*X[:6,:]/180 # returning to radians\n if Return == True:\n return((fig,axes))\n else:\n plt.show()", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial\"][1]) + \" at \" + str(self.values[\"Trial\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream.write(dict(x=self.values[\"Trial\"][0], y=self.values[\"Trial\"][1]))", "def visualize(self, time, pred, true):\n plt.plot(time, true, label='Actual')\n plt.plot(time, pred, label='Predicted')\n plt.xlabel('Time')\n plt.ylabel('Price ($)')\n plt.legend(bbox_to_anchor=(0.1, 1), loc=2, borderaxespad=0.,\n prop={'size': 14})\n plt.show()", "def _update_plot(self):\n\n self.T_ex[:-1] = self.T_ex[1:]\n self.T_ex[-1] = self.ensemble.T_ex\n self.plot_T_ex[0].set_ydata(self.T_ex)\n self.T_kin[:-1] = self.T_kin[1:]\n self.T_kin[-1] = self.ensemble.T_kin\n self.plot_T_kin[0].set_ydata(self.T_kin)\n self.canvas.draw()\n\n renderer = self.canvas.get_renderer()\n raw_data = renderer.tostring_rgb()\n surf = pygame.image.fromstring(raw_data,\n (self.plot_width, self.disp_height),\n \"RGB\")\n self.game_display.blit(surf, (self.disp_width, 0))", "def update_plot(self,ax):\n self.replot(ax)", "def display_state(self):\n # self.__display(self.state)\n self.__draw(self.state)", "def overview(self, minState=5):\n n = 600\n \n ### first plot: the RTOFFSETs and STATES\n plt.figure(10)\n plt.clf()\n plt.subplots_adjust(hspace=0.05, top=0.95, left=0.05,\n right=0.99, wspace=0.00, bottom=0.1)\n ax1 = plt.subplot(n+11)\n try:\n print self.insmode+' | pri:'+\\\n self.getKeyword('OCS PS ID')+' | sec:'+\\\n self.getKeyword('OCS SS ID')\n \n plt.title(self.filename+' | '+self.insmode+' | pri:'+\n self.getKeyword('OCS PS ID')+' | sec:'+\n self.getKeyword('OCS SS ID'))\n except:\n pass\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('FUOFFSET')*1e3,\n color=(1.0, 0.5, 0.0), label=self.DLtrack+' (FUOFFSET)',\n linewidth=3, alpha=0.5)\n plt.legend(prop={'size':9})\n plt.ylabel('(mm)')\n plt.xlim(0)\n \n plt.subplot(n+12, sharex=ax1) # == DDL movements\n \n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n 1e3*self.raw['DOPDC'].data.field(self.DDLtrack),\n color=(0.0, 0.5, 1.0), linewidth=3, alpha=0.5,\n label=self.DDLtrack)\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n 1e3*self.raw['DOPDC'].data.field('PSP'),\n color=(0.0, 0.5, 1.0), linewidth=1, alpha=0.9,\n label='PSP', linestyle='dashed')\n plt.legend(prop={'size':9})\n plt.ylabel('(mm)')\n plt.xlim(0)\n \n plt.subplot(n+13, sharex=ax1) # == states\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('STATE'),\n color=(1.0, 0.5, 0.0), label='OPDC')\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n self.raw['DOPDC'].data.field('STATE'),\n color=(0.0, 0.5, 1.0), label='DOPDC')\n plt.legend(prop={'size':9})\n plt.ylabel('STATES')\n yl=plt.ylim()\n plt.ylim(yl[0]-1, yl[1]+1)\n plt.xlim(0)\n ### fluxes\n plt.subplot(n+14, sharex=ax1)\n try:\n fsua_dark = self.fsu_calib[('FSUA', 'DARK')][0,0]\n fsub_dark = self.fsu_calib[('FSUB', 'DARK')][0,0]\n fsua_alldark = self.fsu_calib[('FSUA', 'DARK')].sum(axis=1)[0]\n fsub_alldark = self.fsu_calib[('FSUB', 'DARK')].sum(axis=1)[0]\n except:\n print 'WARNING: there are no FSUs calibrations in the header'\n fsua_dark = 0.0\n fsub_dark = 0.0\n fsua_alldark = 0.0\n fsub_alldark = 0.0\n\n M0 = 17.5\n fluxa = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA1')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA2')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA3')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA4')[:,0]-\n fsua_alldark)/\\\n (4*self.getKeyword('ISS PRI FSU1 DIT'))\n print 'FLUX FSUA (avg, rms):', round(fluxa.mean(), 0), 'ADU/s',\\\n round(100*fluxa.std()/fluxa.mean(), 0), '%'\n print ' -> pseudo mag = '+str(M0)+' - 2.5*log10(flux) =',\\\n round(M0-2.5*np.log10(fluxa.mean()),2)\n fluxb = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA1')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA2')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA3')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA4')[:,0]-\n fsub_alldark)/\\\n (4*self.getKeyword('ISS PRI FSU2 DIT'))\n print 'FLUX FSUB (avg, rms):', round(fluxb.mean(), 0), 'ADU/s',\\\n round(100*fluxb.std()/fluxb.mean(), 0), '%'\n print ' -> pseudo mag = '+str(M0)+' - 2.5*log10(flux) =',\\\n round(M0-2.5*np.log10(fluxb.mean()),2)\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\\\n fluxa/1000, color='b', alpha=0.5, label='FSUA')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\\\n fluxb/1000, color='r', alpha=0.5, label='FSUB')\n\n plt.ylim(1)\n plt.legend(prop={'size':9})\n plt.ylabel('flux - DARK (kADU)')\n plt.xlim(0)\n plt.subplot(n+15, sharex=ax1)\n try:\n # -- old data version\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUA'].data.field('OPDSNR'),\n color='b', alpha=0.5, label='FSUA SNR')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUB'].data.field('OPDSNR'),\n color='r', alpha=0.5, label='FSUB SNR')\n except:\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUA'].data.field(self.OPDSNR),\n color='b', alpha=0.5, label='FSUA SNR')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUB'].data.field(self.OPDSNR),\n color='r', alpha=0.5, label='FSUB SNR')\n plt.legend(prop={'size':9})\n \n A = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA1')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,0])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,0]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,0])\n B = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA2')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,1])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,1]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,1])\n C = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA3')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,2])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,2]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,2])\n D = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA4')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,3])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,3]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,3])\n snrABCD_a = ((A-C)**2+(B-D)**2)\n snrABCD_a /= ((A-C).std()**2+ (B-D).std()**2)\n #plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n # snrABCD_a, color='b', alpha=0.5, linestyle='dashed')\n \n A = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA1')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,0])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,0]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,0])\n B = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA2')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,1])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,1]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,1])\n C = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA3')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,2])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,2]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,2])\n D = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA4')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,3])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,3]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,3])\n \n snrABCD_b = ((A-C)**2+(B-D)**2)\n snrABCD_b /= ((A-C).std()**2+ (B-D).std()**2)\n #plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n # snrABCD_b, color='r', alpha=0.5, linestyle='dashed') \n \n # -- SNR levels:\n #plt.hlines([self.getKeyword('INS OPDC OPEN'),\n # self.getKeyword('INS OPDC CLOSE'),\n # self.getKeyword('INS OPDC DETECTION')],\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').min(),\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').max(),\n # color=(1.0, 0.5, 0.0))\n #plt.hlines([self.getKeyword('INS DOPDC OPEN'),\n # self.getKeyword('INS DOPDC CLOSE'),\n # self.getKeyword('INS DOPDC DETECTION')],\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').min(),\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').max(),\n # color=(0.0, 0.5, 1.0))\n # -- plot thresholds\n plt.ylabel('SNR')\n plt.xlim(0)\n \n if self.getKeyword('OCS DET IMGNAME')=='PACMAN_OBJ_ASTRO_':\n # == dual FTK\n plt.subplot(n+16, sharex=ax1)\n plt.ylabel('PRIMET ($\\mu$m)')\n #met = interp1d(np.float_(self.raw['METROLOGY_DATA'].\\\n # data.field('TIME')),\\\n # self.raw['METROLOGY_DATA'].data.field('DELTAL'),\\\n # kind = 'linear', bounds_error=False, fill_value=0.0)\n met = lambda x: np.interp(x,\n np.float_(self.raw['METROLOGY_DATA'].data.field('TIME')),\n self.raw['METROLOGY_DATA'].data.field('DELTAL'))\n metro = met(self.raw['DOPDC'].data.field('TIME'))*1e6\n n_ = min(len(self.raw['DOPDC'].data.field('TIME')),\n len(self.raw['OPDC'].data.field('TIME')))\n\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n metro, color=(0.5,0.5,0.), label='A-B')\n\n w1 = np.where((self.raw['OPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'OPDC FTK stat:', round(100*len(w1[0])/float(n_), 1), '%'\n except:\n print 'OPDC FTK stat: 0%'\n\n w1 = np.where((self.raw['DOPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['DOPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'DOPDC FTK stat:', round(100*len(w1[0])/float(n_), 1), '%'\n except:\n print 'DOPDC FTK stat: 0%'\n\n w = np.where((self.raw['DOPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['DOPDC'].data.field('STATE')[:n_]<=7)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'DUAL FTK stat:', round(100*len(w[0])/float(n_),1), '%'\n except:\n print 'DUAL FTK stat: 0%'\n\n plt.xlim(0)\n plt.plot(self.raw['DOPDC'].data.field('TIME')[w],\n metro[w], '.g', linewidth=2,\n alpha=0.5, label='dual FTK')\n #plt.legend()\n if len(w[0])>10 and False:\n coef = np.polyfit(self.raw['DOPDC'].data.field('TIME')[w],\n metro[w], 2)\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n np.polyval(coef, self.raw['DOPDC'].\n data.field('TIME')),\n color='g')\n plt.ylabel('metrology')\n\n print 'PRIMET drift (polyfit) :', 1e6*coef[1], 'um/s'\n slope, rms, synth = NoisySlope(self.raw['DOPDC'].\n data.field('TIME')[w],\n metro[w], 3e6)\n plt.figure(10)\n yl = plt.ylim()\n plt.plot(self.raw['DOPDC'].data.field('TIME')[w],\n synth, color='r')\n plt.ylim(yl)\n print 'PRIMET drift (NoisySlope):',\\\n slope*1e6,'+/-', rms*1e6, 'um/s'\n else:\n # == scanning\n plt.subplot(n+16, sharex=ax1)\n fringesOPDC = \\\n self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('DATA1')[:,0]-\\\n self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('DATA3')[:,0]\n \n fringesDOPDC =\\\n self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('DATA1')[:,0]-\\\n self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('DATA3')[:,0]\n \n plt.plot(self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('TIME'),\n scipy.signal.wiener(fringesOPDC/fringesOPDC.std()),\n color=(1.0, 0.5, 0.0), alpha=0.6,\n label=self.primary_fsu+'/OPDC')\n plt.plot(self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('TIME'),\n scipy.signal.wiener(fringesDOPDC/fringesDOPDC.std()),\n color=(0.0, 0.5, 1.0), alpha=0.6,\n label=self.secondary_fsu+'/DOPDC')\n plt.legend(prop={'size':9})\n plt.ylabel('A-C')\n plt.xlabel('time stamp ($\\mu$s)')\n return", "def redraw(self, state: EngineeringState) -> None:\n pass", "def show( self ):\n if self.changed:\n self._update_ax() \n self.changed = False", "def update(self):\n self.plot.draw()\n \n func=str(self.edit1b.currentText())\n if self.win.test()==0:\n x=np.linspace(0,10,200)\n elif self.win.test()==1:\n x=np.linspace(0,0.40,200)\n \n pattern1=r'Steel'\n pattern2=r'Aluminium'\n pattern3=r'[\\d]+'\n \n if (func!='Comparison Chart'):\n self.edit2b.setDisabled(False)\n self.edit3b.setDisabled(False)\n self.edit4b.setDisabled(False)\n if (func=='Quenched/Tempered Steel'):\n alpha = 0.0025\n elif (func=='Annealed Steel'):\n alpha = 0.01\n elif (func=='Steel (input Su)'):\n S = str(self.edit2b.text())\n if (self.win.test()==0):\n S = str(float(S)/6.895)\n alpha = notch.alpha(eval(S))\n elif (func=='Aluminium Alloy 356.0 as cast'):\n rho = 0.08\n elif (func=='Aluminium Alloy 6061'):\n rho = 0.025\n elif (func=='Aluminium Alloy 7075'):\n rho = 0.015\n elif (func=='Material dropdown'):\n pass\n \n y1=[]\n if re.search(pattern1,func):\n Su=notch.su_s(alpha)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsp(alpha,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsp(alpha,r,self.win.test())))\n elif re.search(pattern2,func):\n Su=notch.su_a(rho)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsn(rho,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsn(rho,r,self.win.test())))\n \n self.edit2b.setText(str(Su))\n func1 = 'Steel (Su='+str(self.edit2b.text())+')'\n if (func!='Steel (input Su)'):\n self.plot.redraw(x,y,func, self.xlabel)\n elif (func=='Steel (input Su)'):\n self.plot.redraw(x,y,func1, self.xlabel)\n \n elif (func=='Comparison Chart'):\n self.edit2b.setText(\"\")\n self.edit2b.setDisabled(True)\n self.edit3b.setText(\"\")\n self.edit3b.setDisabled(True)\n self.edit4b.setText(\"\")\n self.edit4b.setDisabled(True)\n self.plot.draw_comp(self.xlabel, self.win.test())", "def __draw(self, state:dict):\n _, ax = plt.subplots()\n ax.set_axis_off()\n tb = Table(ax, bbox=[0,0,1,1])\n\n width = height = 1.0 /9 \n\n\n for key in self.state.keys():\n # Add cells\n i,j = self.__display_table_map[key]\n tb.add_cell(i, j, width, height, text='{}'.format(state[key]), \n loc='center',facecolor= self.__color_map[key])\n\n ax.add_table(tb)\n plt.show()", "def plot_internal_controller_states(self, plot='z', **kwargs):\n \n # Check if trajectory is already computed\n if self.traj == None:\n self.compute_trajectory()\n \n plotter = graphical.TrajectoryPlotter( self )\n plotter.plot( self.traj, plot, **kwargs)", "def plot_history(self, filename):\r\n plt.figure(figsize=(12, 9))\r\n plt.plot(self.Objective_value)\r\n plt.xlabel('Iteration')\r\n plt.ylabel('Value')\r\n plt.title('Objective Function Values')\r\n # plt.savefig(filename)\r\n plt.show()\r\n return", "def plot_state(smac, model, x_points, y_points, x_smac, y_smac, step=None):\n from smac.optimizer.acquisition import EI\n\n # cost all points for x\n step = step or len(x_smac)\n x_smac_ = np.array([[x] for x in x_smac[:step]])\n y_smac_ = np.array([[y] for y in y_smac[:step]])\n # as an alternative, we could extract the points from the runhistory again\n # but these points will be scaled to a unit-hypercube\n # X, Y = smac.solver.rh2EPM.transform(runhistory)\n\n model.train(x_smac_, y_smac_)\n\n acq_func = EI(model=model)\n acq_func.update(model=model, eta=np.min(y_smac))\n\n x_points_ = np.array([[x] for x in x_points])\n acq_values = acq_func._compute(X=x_points_)[:, 0]\n\n # plot acquisition function\n y_mean, y_var = model.predict(x_points_)\n y_mean = y_mean[:, 0]\n y_std = np.sqrt(y_var)[:, 0]\n\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(x_points, acq_values)\n plt.title(\"Aquisition Function\")\n\n plt.savefig('fig%da.pdf' % step)\n\n # plot uncertainties\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(x_points, y_mean)\n ax1.fill_between(x_points, y_mean - y_std,\n y_mean + y_std, alpha=0.5)\n ax1.plot(x_smac[:step], y_smac[:step], 'bo')\n ax1.plot(x_smac[:step], y_smac[:step], 'ro')\n ax1.plot(x_points, y_points, '--')\n plt.title(\"Uncertainty Predictions\")\n\n plt.savefig('fig%db.pdf' % step)", "def show(self):\n plt.show()", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def __init__(self, plot, parameters, states, state):\n _PlotInteraction.__init__(self, plot)\n self.parameters = parameters\n StateMachine.__init__(self, states, state)", "def __init__(self, n = 1, m = 1, p = 1):\n \n #############################\n # Parameters\n #############################\n\n # Dimensions\n self.n = n \n self.m = m \n self.p = p\n \n # Labels\n self.name = 'ContinuousDynamicSystem'\n self.state_label = []\n self.input_label = []\n self.output_label = []\n \n # Units\n self.state_units = []\n self.input_units = []\n self.output_units = []\n \n # Default Label and units\n for i in range(n):\n self.state_label.append('State '+str(i))\n self.state_units.append('')\n for i in range(m):\n self.input_label.append('Input '+str(i))\n self.input_units.append('')\n for i in range(p):\n self.output_label.append('Output '+str(i))\n self.output_units.append('')\n \n # Default state and input domain\n self.x_ub = np.zeros(self.n) +10 # States Upper Bounds\n self.x_lb = np.zeros(self.n) -10 # States Lower Bounds\n self.u_ub = np.zeros(self.m) +1 # Control Upper Bounds\n self.u_lb = np.zeros(self.m) -1 # Control Lower Bounds\n \n # Default state and inputs values \n self.xbar = np.zeros(self.n)\n self.ubar = np.zeros(self.m)\n \n # Plot params\n self.domain = [ (-10,10) , (-10,10) , (-10,10) ]\n self.linestyle = 'o-'\n self.linestyle_plus = '--'\n self.linescolor = 'b'\n self.linescolor_plus = 'r'\n self.lines_plus = True # Bool to active second graphic outpout\n self.is_3d = False # Use 2d plot by default\n \n ################################\n # Variables\n ################################\n \n # Initial value for simulations\n self.x0 = np.zeros(self.n) \n \n # Result of last simulation\n self.traj = None\n \n # Cost function for evaluation\n # default is a quadratic cost function with diag Q and R matrices\n self.cost_function = costfunction.QuadraticCostFunction.from_sys(self)", "def render(self, mode='human', close=False):\n plt.figure(figsize=(20,12))\n plt.plot(self.history)\n plt.show()", "def plot_one_state_evolution(self, value, option='density'):\n\n fig = plt.figure(figsize=(16, 8), dpi=100)\n ax = fig.add_subplot(111)\n if option == 'density':\n ax.plot(np.squeeze(np.array(self.est_density[value, :])), 'r--', label='Estimated')\n elif option == 'flow':\n if value == 0:\n # plot qin\n index = self.x_index['qin']\n elif value == self.num_cells:\n # plot qout\n index = self.x_index['qout']\n elif value in self.x_index['onramp'].keys():\n # plot onramp flow\n index = self.x_index['onramp'][value]\n elif value in self.x_index['offramp'].keys():\n # plot offramp flow\n index = self.x_index['offramp'][value]\n else:\n raise Exception('KeyError: value must be the cell id')\n\n ax.plot(np.squeeze(np.array(self.est_state_all[index, :])), 'r--', label='Estimated')\n\n plt.title('{0} at {1}'.format(option, value))\n plt.xlabel('Time (step)')\n plt.ylabel('Value')\n\n plt.grid(True)\n plt.legend()\n\n plt.draw()", "def update_figure(self):\n\n self.draw()", "def scree_plot(self, ev):\n plt.scatter(range(1,len(ev)+1), ev)\n plt.plot(range(1,len(ev)+1), ev)\n plt.title(\"Scree Plot\")\n plt.xlabel(\"Factors\")\n plt.ylabel(\"Eigenvalue\")\n plt.grid()\n plt.show()", "def plot(self):\n\t\tself.plotOfSpect()", "async def plot(self, new=False) -> None:\n self._logger.debug(\"running\")\n self.figure.clear()\n self.figure.set_tight_layout(True)\n num_plots = len(self._plots)\n axes = None\n for i in range(num_plots):\n plot = self._plots[i]\n name = plot[0]\n active = plot[2]\n if active:\n if i == 0:\n axes = self.figure.add_subplot(1, 1, 1)\n axes.tick_params(axis='x', labelrotation=30)\n axes.set_ylabel(name, color='#1f77b4')\n await sleep(.001)\n if not new:\n await create_task(self.plot_device_data(axes, name))\n else:\n alt_axes = axes.twinx()\n alt_axes.set_ylabel(name, color='#ff7f0e')\n alt_axes.tick_params(axis='y', labelcolor='#ff7f0e')\n alt_axes.set_yticks(np.arange(0, 6, step=1))\n await sleep(.001)\n if not new:\n await create_task(self.plot_device_data(alt_axes, name))\n\n if not new:\n self.add_vert_lines()\n await sleep(.001)\n self.figure.canvas.draw()\n self._logger.debug(\"done\")", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def learning_viz(self) :\n self.train\n history = self.history\n plot_loss(history)", "def _plot_update(self):\n omit_log = ['sens_log']\n for log_group, log_arrays in self.qbpm.log_names.items():\n for log_array in log_arrays:\n if log_array not in omit_log:\n self.curves[log_array].setData(self.qbpm.log_time, self.qbpm.log_arrays[log_array],clear=True)\n # self.fill.setCurves(self.curves['posz_sens_low_log'], self.curves['posz_sens_high_log'])", "def plot(self):\n\t\tself.plotOfIP().plot()", "def show_plot(self):\r\n\t\tself.generate_plot()\r\n\t\tplt.show()", "def plotLoss():\n # ssr\n ssr = np.log(gradientDescent(X, y)[1])\n # number of iterations \n iterations = np.log(np.arange(1, len(ssr) + 1, 1))\n # plot reduction of ssr\n plt.plot(iterations, ssr)\n # xlabel\n plt.xlabel(\"Iteration\")\n # ylabel\n plt.ylabel(\"SSR\")\n # title\n plt.title(\"Reduction of SSR by number of Iterations\")\n # show plot \n plt.show()", "def plot(self, iteration=None, stateVectorConv=None): \n r = [\"{0}\".format(self.__class__.__name__)]\n if iteration is not None:\n r.append(\"i: {0}\".format(iteration))\n fmt = lambda a : \", \".join([\"{0:.4g}\".format(float(v)) for v in a])\n r.append(\"stateVector: {0}\".format(fmt(self.stateVector)))\n if stateVectorConv is not None:\n r.append(\"stateVectorConv: {0:.4g}\".format(stateVectorConv))\n \n s = \"; \".join(r)\n \n if iteration is not None and self.verbose > 0:\n print(s)\n \n if self.verbose > 4:\n nplot = 2 + len(self.stateVector)\n fig = pyplot.figure()\n fig.subplots_adjust(left=0.17, bottom=0.09, right=0.98, \n top=0.92, wspace=0.12, hspace=0.2)\n ax = fig.add_subplot(nplot,1,1)\n ax.set_title(s)\n ax.set_ylabel(\"$R [sr^{-1}]$\")\n ax.plot(self.independentVariable, self.observation, 'k', \n label='measurement')\n ax.plot(self.independentVariable, self.modelCalculation, 'r', \n label='model')\n ax.legend(loc='lower right')\n \n l = fig.add_subplot(nplot,1,2)\n l.plot(self.independentVariable, \n (self.observation-self.modelCalculation)/self.observationError, \n 'k', label=\"err\")\n l.set_ylabel(\"$\\Delta R/\\sigma$\")\n \n color = ['k-', 'r-', 'b-', 'g-', 'k--', 'r--', 'b--', 'g--', 'k-.', \n 'r-.', 'b-.', 'g-.', 'k:', 'r:', 'b:', 'g:']\n for i in range(len(self.stateVector)):\n name = self.parameterNames[i]\n k = fig.add_subplot(nplot,1,3+i)\n k.plot(self.independentVariable, self.Jacobian[:, i], 'k')\n k.set_ylabel(r\"$\\partial R/\\partial ({0})$\".format(name.replace(\"_\", \" \")))\n \n k.set_xlabel(\"$\\lambda [nm]$\")\n \n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n \n if self.verbose > 1:\n fig.show()\n else:\n fig.savefig(\"{0}_{1}_{2}.pdf\".format(r[0], \n r[1].split()[1][:-1], \n (\"{0:02d}\".format(iteration) \n if iteration is not None \n else \"final\")), transparent=True)", "def s_plot(self):\n # We have to make sure the slot is right before truing to plot it\n error = self.check(self.machine.rotor)\n\n if error: # Error => Display it\n QMessageBox().critical(self, self.tr(\"Error\"), error)\n else: # No error => Plot the machine\n self.machine.plot()", "def show():\n plt.show()", "def show():\n plt.show()", "def show():\n plt.show()", "def force_draw(self):\n import matplotlib.pyplot as plt\n\n plt.show()", "def plot_state(mu, sigma, landmarks, timestep, observedLandmarks, z, window):\n\n plt.clf()\n plt.grid('on')\n \n draw_probe_ellipse(mu[:2], sigma[:2,:2], 0.6, 'r')\n plt.plot(landmarks['x'], landmarks['y'], 'k+', markersize=10, linewidth=5)\n\n for i in range(len(observedLandmarks)):\n\tif observedLandmarks[i]:\n\t plt.plot(mu[2*i + 3],mu[2*i + 4], 'bo', fillstyle='none', markersize=10, linewidth=5)\n \t draw_probe_ellipse(mu[2*i + 3:2*i+ 5], sigma[2*i + 3:2*i+ 5,2*i + 3:2*i + 5], 0.6, 'b')\n\n for i in range(len(z)):\n\tmX = mu[2*z[i]['id'] + 3]\n\tmY = mu[2*z[i]['id'] + 4]\n \tplt.plot([mu[0], mX], [mu[1], mY], color='k', linewidth=1)\n\n drawrobot(mu[:3], 'r', 3, 0.3, 0.3)\n plt.xlim([-2., 12.])\n plt.ylim([-2., 12.])\n\n if window:\n plt.draw()\n plt.pause(0.1)\n else:\n filename = '../ekf_%03d.png'.format(timestep)\n plt.savefig(filename)", "def visualize(self):\r\n self.aggregator.plot_loss()\r\n self.save_figure()", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def updateToplogy(self):\r\n raise NotImplementedError", "def updateState(self):\n\t\t# ask for current pose data\n\t\tcomm.write(b'id1 mav.pose_sensor get_local_data \\n')\n\t\t# update x value\n\t\tcomm.read_until(b'\"x\": ') # b'' as Telnet needs a bytes object instead of string since Python3\n\t\tread = comm.read_until(b',') # returns read values + finishing ','\n\t\tread = read[:-1] # cut that ','\n\t\tcurrent_state.x = float(read)\n\t\tself.state_x_label.set_text(\"%0.2f\" % current_state.x)\n\t\t# update y value\n\t\tcomm.read_until(b'\"y\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.y = float(read)\n\t\tself.state_y_label.set_text(\"%0.2f\" % current_state.y)\n\t\t# update z value\n\t\tcomm.read_until(b'\"z\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.z = float(read)\n\t\tself.state_z_label.set_text(\"%0.2f\" % current_state.z)\n\t\t# update yaw value\n\t\tcomm.read_until(b'\"yaw\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.psi = float(read)\n\t\tself.state_psi_label.set_text(\"%0.2f\" % current_state.psi)\n\t\t# update pitch value\n\t\tcomm.read_until(b'\"pitch\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.theta = float(read)\n\t\tself.state_theta_label.set_text(\"%0.2f\" % current_state.theta)\n\t\t# update roll value\n\t\tcomm.read_until(b'\"roll\": ')\n\t\tread = comm.read_until(b'}')\n\t\tread = read[:-1]\n\t\tcurrent_state.phi = float(read)\n\t\tself.state_phi_label.set_text(\"%0.2f\" % current_state.phi)\n\n\t\t# ask for current velocity data\n\t\tcomm.write(b'id1 mav.velocity_sensor get_local_data \\n')\n\t\t# update p value\n\t\tcomm.read_until(b'\"angular_velocity\": [')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.p = float(read)\n\t\tself.state_p_label.set_text(\"%0.2f\" % current_state.p)\n\t\t# update q value\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.q = float(read)\n\t\tself.state_q_label.set_text(\"%0.2f\" % current_state.q)\n\t\t# update r value\n\t\tread = comm.read_until(b']')\n\t\tread = read[:-1]\n\t\tcurrent_state.r = float(read)\n\t\tself.state_r_label.set_text(\"%0.2f\" % current_state.r)\n\n\t\t# update x_dot value\n\t\tcomm.read_until(b'\"world_linear_velocity\": [')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.x_dot = float(read)\n\t\tself.state_x_dot_label.set_text(\"%0.2f\" % current_state.x_dot)\n\t\t# update y_dot value\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.y_dot = float(read)\n\t\tself.state_y_dot_label.set_text(\"%0.2f\" % current_state.y_dot)\n\t\t# update z_dot value\n\t\tread = comm.read_until(b']')\n\t\tread = read[:-1]\n\t\tcurrent_state.z_dot = float(read)\n\t\tself.state_z_dot_label.set_text(\"%0.2f\" % current_state.z_dot)\n\n\t\t# update first waypoint for trajectory in GUI\n\t\twaypoints_gui[0] = [current_state.x, current_state.y, current_state.z, current_state.psi]\n\n\t\treturn GLib.SOURCE_CONTINUE", "def single_high_a_state(\n shape=(50, 20),\n time_step=0.01,\n num_changed_states=1,\n number_timesteps=3000,\n snap_shot_rate=100,\n initial_value=0.14\n):\n time_array = np.arange(0, number_timesteps * time_step, time_step)\n\n deviation_a = np.zeros(shape)\n deviation_b = np.zeros(shape)\n\n for _ in range(num_changed_states):\n index = np.random.randint(shape[0]), np.random.randint(shape[1])\n while deviation_a[index] > 0:\n index = np.random.randint(shape[0]), np.random.randint(1)\n deviation_a[index] = initial_value\n\n for num, _ in enumerate(time_array):\n deviation_a_update, _ = react_diff(\n deviation_a,\n deviation_b,\n diffusion_coef=1.,\n dt=time_step,\n is_1d=False\n )\n deviation_b_update, _ = react_diff(\n deviation_a,\n deviation_b,\n diffusion_coef=3.,\n is_a_substance=False,\n dt=time_step,\n is_1d=False\n )\n deviation_a = deviation_a_update\n deviation_b = deviation_b_update\n\n plt.figure(1)\n plt.xlabel('X direction')\n plt.ylabel('Y direction')\n plt.title('Two dimensional reaction diffusion system w/ \\n'\n 'single high value for species A. Plot species A')\n\n plt.figure(2)\n plt.xlabel('X direction')\n plt.ylabel('Y direction')\n plt.title('Two dimensional reaction diffusion system w/ \\n'\n 'single high value for species A. Plot species B')\n if num % snap_shot_rate == 0:\n plt.figure(1)\n plt.imshow(deviation_a)\n plt.pause(0.1)\n plt.draw()\n\n plt.figure(2)\n plt.imshow(deviation_b)\n plt.pause(0.1)\n plt.draw()\n\n plt.show()", "def show():\n setup()\n plt.show()", "def plot_initial_state(input_file_name='initial_state.nc',\n output_file_name='initial_state.png'):\n\n # load mesh variables\n chunks = {'nCells': 32768, 'nEdges': 32768}\n ds = xarray.open_dataset(input_file_name, chunks=chunks)\n nCells = ds.sizes['nCells']\n nEdges = ds.sizes['nEdges']\n nVertLevels = ds.sizes['nVertLevels']\n\n fig = plt.figure()\n fig.set_size_inches(16.0, 12.0)\n plt.clf()\n\n print('plotting histograms of the initial condition')\n print('see: init/initial_state/initial_state.png')\n d = datetime.datetime.today()\n txt = \\\n 'MPAS-Ocean initial state\\n' + \\\n 'date: {}\\n'.format(d.strftime('%m/%d/%Y')) + \\\n 'number cells: {}\\n'.format(nCells) + \\\n 'number cells, millions: {:6.3f}\\n'.format(nCells / 1.e6) + \\\n 'number layers: {}\\n\\n'.format(nVertLevels) + \\\n ' min val max val variable name\\n'\n\n plt.subplot(3, 3, 2)\n varName = 'maxLevelCell'\n var = ds[varName]\n maxLevelCell = var.values - 1\n xarray.plot.hist(var, bins=nVertLevels - 4)\n plt.ylabel('frequency')\n plt.xlabel(varName)\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n plt.subplot(3, 3, 3)\n varName = 'bottomDepth'\n var = ds[varName]\n xarray.plot.hist(var, bins=nVertLevels - 4)\n plt.xlabel(varName)\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n cellsOnEdge = ds['cellsOnEdge'].values - 1\n cellMask = np.zeros((nCells, nVertLevels), bool)\n edgeMask = np.zeros((nEdges, nVertLevels), bool)\n for k in range(nVertLevels):\n cellMask[:, k] = k <= maxLevelCell\n cell0 = cellsOnEdge[:, 0]\n cell1 = cellsOnEdge[:, 1]\n edgeMask[:, k] = np.logical_and(np.logical_and(cellMask[cell0, k],\n cellMask[cell1, k]),\n np.logical_and(cell0 >= 0,\n cell1 >= 0))\n cellMask = xarray.DataArray(data=cellMask, dims=('nCells', 'nVertLevels'))\n edgeMask = xarray.DataArray(data=edgeMask, dims=('nEdges', 'nVertLevels'))\n\n plt.subplot(3, 3, 4)\n varName = 'temperature'\n var = ds[varName].isel(Time=0).where(cellMask)\n xarray.plot.hist(var, bins=100, log=True)\n plt.ylabel('frequency')\n plt.xlabel(varName)\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n plt.subplot(3, 3, 5)\n varName = 'salinity'\n var = ds[varName].isel(Time=0).where(cellMask)\n xarray.plot.hist(var, bins=100, log=True)\n plt.xlabel(varName)\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n plt.subplot(3, 3, 6)\n varName = 'layerThickness'\n var = ds[varName].isel(Time=0).where(cellMask)\n xarray.plot.hist(var, bins=100, log=True)\n plt.xlabel(varName)\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n plt.subplot(3, 3, 7)\n varName = 'rx1Edge'\n var = ds[varName].isel(Time=0).where(edgeMask)\n maxRx1Edge = var.max().values\n xarray.plot.hist(var, bins=100, log=True)\n plt.ylabel('frequency')\n plt.xlabel('Haney Number, max={:4.2f}'.format(maxRx1Edge))\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n font = FontProperties()\n font.set_family('monospace')\n font.set_size(12)\n print(txt)\n plt.subplot(3, 3, 1)\n plt.text(0, 1, txt, verticalalignment='top', fontproperties=font)\n plt.axis('off')\n\n plt.tight_layout(pad=4.0)\n\n plt.savefig(output_file_name, bbox_inches='tight', pad_inches=0.1)", "def updatePlot(self):\n if len(self.baslin):\n X = list(t[0] for t in self.baslin)\n Y = list(t[1] for t in self.baslin)\n self.BLplt.set_xdata(X)\n self.BLplt.set_ydata(Y)\n if self.BLtyp == 'S':\n if self.BL is None:\n self.BL, = self.axes.plot(self.data[0], self.data[2], linestyle='-', color='green')\n else:\n self.BL.set_ydata(self.data[2])\n self.canvas.draw()", "def plot(self, *args, **kwargs):\n pass", "def plot_active_state(signal, active_state, labels=[]):\n import matplotlib.collections as col\n\n if signal.ndim == 1:\n channels = 1\n height = 0.9\n else:\n points, channels = signal.shape\n height = 0.9/channels\n\n fig = plt.figure()\n\n yprops = dict(rotation=0,\n horizontalalignment='right',\n verticalalignment='center',\n x=-0.01)\n\n axprops = dict(yticks=[])\n\n for i in range(channels):\n\n sig = signal[:, i]\n act_state = active_state[:, i]\n # [left, bottom, width, height] bottom and height are parameters!!\n ax = fig.add_axes([0.1, height * i+0.05, 0.8, height], **axprops)\n ax.plot(sig)\n\n collection = col.BrokenBarHCollection.span_where(x=range(len(sig)),\n ymin=min(sig),\n ymax=max(sig),\n where=act_state > 0,\n facecolor='green')\n\n ax.add_collection(collection)\n if labels:\n ax.set_ylabel(labels[i], **yprops)\n if i == 0:\n axprops['sharex'] = ax\n axprops['sharey'] = ax\n else:\n plt.setp(ax.get_xticklabels(), visible=False)", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial1\"][1]) + \" at \" + str(self.values[\"Trial1\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream1.write(dict(x=[], y=[]))\n\t\t\tself.stream2.write(dict(x=[], y=[]))\n\t\t\tself.stream3.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream1.write(dict(x=self.values[\"Trial1\"][0], y=self.values[\"Trial1\"][1]))#, trace=Bar)\n\t\t\tself.stream2.write(dict(x=self.values[\"Trial2\"][0], y=self.values[\"Trial2\"][1]))\n\t\t\tself.stream3.write(dict(x=self.values[\"Trial3\"][0], y=self.values[\"Trial3\"][1]))", "def visualize_signal(self):\n plt.figure()\n plt.title('Accelerometer Signal')\n plt.plot(range(len(self.data)), self.data[1])", "def plot_state(\n self, highlightRobot=None, plotRobotIDs=True,\n returnax=True, figname=\"kaijuGrid.pdf\"\n ):\n from kaiju.utils import plotOne\n if returnax:\n ax = plotOne(\n step=0, robotGrid=self, isSequence=False,\n plotRobotIDs=plotRobotIDs,\n highlightRobot=highlightRobot, returnax=True\n )\n return ax\n else:\n plotOne(\n step=0, robotGrid=self, figname=figname, isSequence=False,\n plotRobotIDs=plotRobotIDs,\n highlightRobot=highlightRobot\n )", "def plot(self):\n\t\tself.plotOfXray().plot()", "def plot_control_law(self, i=0, j=1, k=0, t=0, n = 10, sys = None):\n \n # Extract sys info\n \n if sys is not None:\n xname = sys.state_label[i] + ' ' + sys.state_units[i]\n yname = sys.state_label[j] + ' ' + sys.state_units[j]\n xmax = sys.x_ub[i]\n xmin = sys.x_lb[i]\n ymax = sys.x_ub[j]\n ymin = sys.x_lb[j]\n xbar = sys.xbar\n \n else:\n xname = 'state x[%i]'%i\n yname = 'state x[%i]'%j\n xmax = 10\n xmin = -10\n ymax = 10\n ymin = -10\n xbar = np.zeros( self.p )\n \n # Computing\n \n x = np.linspace( xmin , xmax , n )\n y = np.linspace( ymin , ymax , n )\n \n X, Y = np.meshgrid( x, y)\n \n U = np.zeros((n,n)) # control action table\n \n for l in range(n):\n for m in range(n):\n \n # Actual states\n x = np.copy( xbar ) # default value for all states\n x[ i ] = X[l, m]\n x[ j ] = Y[l, m]\n \n # Control action\n u = self.cbar( x , t ) \n \n U[l, m] = u[k] # extract control input element k\n \n \n # Ploting\n fig = plt.figure(figsize=(3, 2),dpi=300, frameon=True)\n \n fig.canvas.manager.set_window_title('Control law for u[%i]'%i)\n ax = fig.add_subplot(1,1,1)\n \n ax.tick_params('both',labelsize = 5 )\n plt.ylabel(yname, fontsize = 5 )\n plt.xlabel(xname, fontsize = 5 )\n \n im1 = plt.pcolormesh( X , Y , U, shading='gouraud' , cmap = 'bwr')\n \n cbar = plt.colorbar(im1)\n cbar.ax.tick_params(labelsize=5)\n \n \n plt.axis([xmin,xmax,ymin,ymax])\n\n plt.grid(True)\n plt.tight_layout() \n plt.show()", "def visualize(self):\n import matplotlib.pyplot as plt\n import numpy as np\n\n plt.figure()\n sw_ = np.linspace(0.0, 1.0, 50)\n plt.plot(sw_, self.krw(sw_), label=\"Water\")\n plt.plot(sw_, self.kro(sw_), label=\"Oil\")\n plt.xlabel(\"Water saturation\")\n plt.ylabel(\"Relative permeability\")\n plt.legend()", "def __init__(self, state_0):\n self.state = state_0\n self.s_dot = 0\n self.hist = []\n self.time = 0.0\n control_frequency = 200 # Hz for attitude control loop\n self.dt = 1.0 / control_frequency\n self.desired_state = 0", "def plot(self):\n x = np.arange(5)\n # labels = ['temp', 'humi', 'mais', 'o2', 'co2']\n plt.bar(x - 0.35/2, self.data, 0.35, label='actual')\n plt.bar(x + 0.35/2, self.desired_values, 0.35, label='desired')\n plt.ylim(-5, 80)\n plt.legend()\n\n plt.draw()\n plt.pause(0.000001)\n plt.clf()", "def _UpdatePlot( self ):\n self._BusyDoOp( self._UpdatePlotImpl )", "def plot_solution(self):\n\n plt.plot(self.x_values, self.analytical(self.x_values, self.C,self.D), label = \"Analytical\")\n plt.plot(self.x_values, self.numerical, label = \"Numerical\")\n plt.title(\"Numerical vs. Analytical Solution\")\n plt.xlabel(\"x\")\n plt.ylabel(\"u(x)\")\n plt.legend()\n plt.show()", "def correct(self):\n self.parent.copyCurrentWinState(self.pltw)\n self.pltw.blklst[self.blkno][self.ypos] = self.data[1] - self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def update_visualization(self) -> None:\n pass", "def update_plot(axes):\n axes.clear()\n\n i = C.i\n C.i += di # globale Zählvariable erhöhen\n if C.i >= len(tt):\n time.sleep(2)\n C.i = 0\n\n t = tt[i]\n q1 = qq1[i]\n q2 = qq2[i]\n q3 = qq3[i]\n CCframe(q1, q2, q3)\n\n # Ausgabe der aktuellen Zeit\n pl.text(0.06, 0.05, \"t = %3.2fs\" % t, transform = axes.transAxes)\n pl.axis([-3, 3, -3, 3])\n axes.figure.canvas.draw()", "def show_plot() :\n logger.info(\"Show plot\")\n pylab.axis('equal')\n pylab.xlabel(\"Longitud\")\n pylab.ylabel(\"Latitud\")\n pylab.grid(True)\n pylab.title(\"Product tiles and product source\")\n pylab.show()", "def plotTime(self):\n plt.figure()\n t = [i for i in range(len(self.nodes_infected))]\n print(t)\n plt.title('Nodos infectados vs Tiempo')\n plt.xlabel('Instantes de tiempo')\n plt.ylabel('# de nodos infectados')\n plt.plot(t, self.nodes_infected)\n plt.grid(True)\n plt.show()", "def _plot(self, step, rewards, losses):\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('Total Episode Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('MSE Loss')\n plt.plot(losses)\n plt.show()", "def plot_motion(x, y):\n plt.xlabel(\"X Position (m)\")\n plt.ylabel(\"Y Position (m)\")\n plt.plot(x, y)\n plt.show()", "def ion():\n plt.ion()", "def plot(self):\n\t\tself.plotOfCos1().plot()", "def updateGraphics(board, step, caxes):\r\n boardArray= numpy.transpose(numpy.asarray(board))\r\n caxes.set_data(boardArray)\r\n plt.title('Step ' + str(step))\r\n plt.pause(constants.BLINK)\r\n plt.show()", "def display_state(self):\r\n\r\n print('\\n')\r\n print('>>CURRENT STATE')\r\n ct = 0\r\n for i in self.state:\r\n for j in i:\r\n if j == -1:\r\n val = 'X'\r\n else:\r\n val = str(ct)\r\n if len(val) == 1:\r\n print(' ' + val + ' ', end='')\r\n else:\r\n print(val + ' ', end='')\r\n ct += 1\r\n print('\\n')", "def UpdateState( self, **kwargs ):\n if bool( self ):\n if 'scale_mode' in kwargs:\n kwargs[ 'replot' ] = True\n\n kwargs = self._UpdateStateValues( **kwargs )\n redraw = kwargs.get( 'redraw', False )\n replot = kwargs.get( 'replot', False )\n\n if self.logger.isEnabledFor( logging.DEBUG ):\n self.logger.debug(\n '%s: redraw=%s, replot=%s',\n\t self.GetTitle(), str( redraw ), str( replot )\n\t )\n\n if replot:\n self._UpdateDataSetValues()\n self._UpdatePlot()\n\n elif redraw:\n self._DoUpdateRedraw()\n self.canvas.draw()", "def plot_data(self):", "def updatePlot(self,cost):\r\n timer = time.clock() - self.start_timer\r\n # Add new values to plot data set \r\n self.lines.set_xdata(np.append(self.lines.get_xdata(), timer))\r\n self.lines.set_ydata(np.append(self.lines.get_ydata(), cost))\r\n # Re-scale\r\n self.axCost.relim()\r\n self.axCost.autoscale_view()\r\n # Update plot\r\n self.canvasPlot.draw()\r\n self.canvasPlot.flush_events()", "def update(self, index: Optional[int] = None) -> None:\n super().update(index)\n self.ax.clear()\n if self._matrix is not None:\n df = pd.DataFrame(data=np.matmul(self.simulation.history.to_numpy(), self._matrix),\n columns=self.categories,\n index=self.simulation.history.index)\n else:\n df = self.simulation.history\n df.plot(ax=self.ax)\n\n self.ax.set_yscale(self.yscale)\n if self.yscale in ['symlog', 'log']:\n self.ax.set_ylim(0, 2 * self.simulation.simulator.n)\n else:\n self.ax.set_ylim(0, 1.1 * self.simulation.simulator.n)\n\n # rotate the x labels if they are time units\n if self.simulation.time_units:\n for tick in self.ax.get_xticklabels():\n tick.set_rotation(45)\n self.fig.tight_layout()\n self.fig.canvas.draw()", "def new_plot_dmstep(self):\n self._reset_ploting_area()\n self.plot_dmstep()", "def plot_states(F, U, X_hat, x0=np.array([-0.72, -0.64])):\n n = len(U)\n\n X = [x0]\n for i in range(n):\n X.append(F(X[-1], u(i)))\n X = np.array(X)\n\n fig, ax = plt.subplots()\n ax.plot(X[:, 0], X[:, 1], '.', color='blue')\n ax.plot(X_hat[:, 0], X_hat[:, 1], '+', color='black')\n ax.set_xlim(-2, 1)\n ax.set_ylim(-2, 1)\n\n return fig, ax", "def update(self):\n print(\"sensorState Update\")", "def plot_refresh():\n figure.canvas.draw()", "def display(self):\n ob = self._convert_state(self._env.reset())\n done = False\n while not done:\n ac, _ = self._act(ob, stochastic=False)\n ob, rew, done, _ = self._env.step(ac)\n ob = self._convert_state(ob)\n self._env.render()\n self._env.close()", "def _PlotGraph(self, event):\n self._rcvLock.acquire()\n for j in event.data[0].keys():\n data = event.data[0][j]\n #print data\n line = []\n for k in data.keys():\n if k in COLORS.keys():\n c = COLORS[k]\n else:\n c = 'black'\n line.append(plot.PolyLine(data[k], colour=c, width=1,\n legend=\"Node %d\"%(k,)))\n # To draw markers: default colour = black, size = 2\n # shapes = 'circle', 'cross', 'square', 'dot', 'plus'\n #marker = plot.PolyMarker(event.data[1], marker='triangle')\n\n # set up text, axis and draw\n if j == ERRORPLOT:\n t = \"Synchronization Error\"\n xa = \"Time [s]\"\n ya = \"Error [ms]\"\n elif j == TEMPPLOT:\n t = \"Temperature Index\"\n xa = \"Time [s]\"\n ya = \"Index\"\n elif j == SKEWPLOT:\n t = \"Frequency Error\"\n xa = \"Time [s]\"\n ya = \"Frequency Error [ppm]\"\n gc = plot.PlotGraphics(line, t, xa, ya)\n # Draw graphs for each plot\n self.plotter[j].Draw(gc, xAxis=(self._x_lower,\n self._x_upper), yAxis=(float(self._y_lower[j]),\n float(self._y_upper[j])))\n self._rcvLock.release()", "def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()" ]
[ "0.7050977", "0.70377105", "0.6921945", "0.6876264", "0.68268037", "0.6729921", "0.66884774", "0.6483927", "0.6483927", "0.6483927", "0.6483927", "0.6483927", "0.64827865", "0.64666325", "0.6431012", "0.64294916", "0.63720846", "0.6361515", "0.63082445", "0.6291267", "0.6290817", "0.62862056", "0.6275899", "0.62640166", "0.62494546", "0.6238472", "0.62365025", "0.6211897", "0.6204306", "0.6190134", "0.616414", "0.6158491", "0.6154597", "0.6109359", "0.6081149", "0.6069158", "0.6068709", "0.60593456", "0.6057527", "0.6056808", "0.60564476", "0.6045829", "0.6041681", "0.60415363", "0.6020289", "0.6001973", "0.59981805", "0.59901714", "0.59763783", "0.59675467", "0.5955031", "0.5937508", "0.5933611", "0.59286207", "0.59189165", "0.59189165", "0.59189165", "0.5916791", "0.5912795", "0.5907591", "0.59052706", "0.5897974", "0.5883314", "0.58718973", "0.586635", "0.5864829", "0.58634806", "0.5839689", "0.58367026", "0.58325064", "0.5819847", "0.58194685", "0.58128047", "0.58082503", "0.57966256", "0.57962227", "0.57923144", "0.57919884", "0.5787086", "0.57774436", "0.57763374", "0.5765108", "0.5764798", "0.5760538", "0.5758361", "0.57579", "0.574796", "0.57428485", "0.572797", "0.57273465", "0.57271385", "0.5726249", "0.572267", "0.57133573", "0.57089245", "0.570002", "0.5699856", "0.56894946", "0.5683206", "0.5680388", "0.5669856" ]
0.0
-1
Density of gas assuming ideal gas law in kg/m^3.
def density(self): return (1e-3*self.molar_mass) * self.pressure / (gas_constant * self.temperature) # kg/m^3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fGasDensity(GasGravity, Temperature, Pressure):\n\tGasConstant = 8.314\n\tPress = Pressure / 145.038 # MPa\n\tTemp = Temperature + 273.16 # Deg K\n\tPr = Press / (4.892 - (0.4048 * GasGravity))\n\tTr = Temp / (94.72 + (170.75 * GasGravity))\n\tA = 0.03 + 0.00527 * ((3.5 - Tr)**3)\n\tB = (0.642 * Tr) - (0.007 * (Tr**4)) - 0.52\n\tC = 0.109 * ((3.85 - Tr)**2)\n\tD = exp(-((0.45 + (8 * ((0.56 - (1 / Tr))**2))) * ((Pr**1.2) / Tr)))\n\tZ = (A * Pr) + B + (C * D)\n\treturn (28.8 * GasGravity * Press) / (Z * GasConstant * Temp)", "def density_of_air(self) -> float:\n\n return self.pressure / (SPECIFIC_GAS_CONSTANT_OF_AIR * self.ambient_temperature)", "def electron_density_per_m3(self):\n return self.electron_density * 1e6", "def density_g_cm3(self):\n return self._density_kg_m3 / 1000.0", "def air_density(altitude):\n p = pressure(altitude) # psf\n t = temperature(altitude) # R\n rho = p/(gas_constant*t) # lb/ft3\n return rho", "def getGasDensity(grid=None, ppar=None):\n mesh = np.meshgrid(grid.x, grid.y, grid.z, indexing='ij')\n if ppar['crd_sys'] == 'sph':\n rr = mesh[0]\n tt = mesh[1]\n pp = mesh[2]\n xx = rr * np.sin(tt) * np.sin(pp)\n yy = rr * np.sin(tt) * np.cos(pp)\n zz = rr * np.cos(tt)\n cyrr = np.sqrt(xx**2. + yy**2)\n elif ppar['crd_sys'] == 'car':\n xx = mesh[0]\n yy = mesh[1]\n zz = mesh[2]\n rr = np.sqrt(xx**2 + yy**2 + zz**2)\n cyrr = np.sqrt(xx**2. + yy**2.)\n else:\n raise ValueError('crd_sys not specified in ppar')\n\n # calculate scale height\n hh = ppar['Ht'] * (cyrr / ppar['Rt'])**ppar['qheight']\n\n # calculate surface density\n nflat = len(ppar['Rin'])\n flat = cyrr * 0.\n for ii in range(nflat):\n flatii = fn_getflat(cyrr, ppar['Rin_w'][ii], ppar['Rin'][ii], \n ppar['Rout'][ii], ppar['Rout_w'][ii], \n ppar['sigp'][ii], ppar['sig0'][ii])\n flat = flat + flatii\n\n nring = len(ppar['ring_r'])\n ring = cyrr * 0\n for ii in range(nring):\n ringii = fn_getring(cyrr, ppar['ring_r'][ii], \n ppar['ring_win'][ii],ppar['ring_wout'][ii], \n ppar['ring_a'][ii])\n ring = ring + ringii\n\n sig = flat + ring\n\n rhogas = sig / np.sqrt(2.*np.pi) / hh * np.exp(-0.5 * (zz / hh)**2)\n reg = rhogas < ppar['cutgdens']\n rhogas[reg] = ppar['cutgdens']\n\n return rhogas", "def electron_density(self):\n return N_avo * self.num_electrons * self.density / self.molar_mass", "def density_by_ideal_gas_law(\n p: tf.Tensor,\n r: tf.Tensor,\n t: tf.Tensor,\n ) -> tf.Tensor:\n return p / r / t", "def density(self, alt):\n (Z, T, CN2, CO2, CO, CAr, CHe, CH, CM, WM) = self.altitude_profile(alt)\n\n # using eqn(42) of COESA for multiple gases\n M_i = [wmN2, wmO2, wmO, wmAr, wmHe, wmH] << (u.g / u.mol)\n n_i = [\n CN2.to_value(u.m**-3),\n CO2.to_value(u.m**-3),\n CO.to_value(u.m**-3),\n CAr.to_value(u.m**-3),\n CHe.to_value(u.m**-3),\n CH.to_value(u.m**-3),\n ] << (1 / u.m**3)\n rho = (n_i @ M_i) / Na\n return rho.to(u.kg / u.m**3)", "def computeChargeDensity(self):\n \n self.rho = np.zeros((self.ni, self.nj, self.nk))\n \n for species in self.speciesList:\n if species.charge!=0:\n self.rho += species.charge*species.den", "def test_density(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.density[0], 2.26666666666663)", "def density_kg_m3(self):\n return self._density_kg_m3", "def mass_tot_rho(self):\n\n dm = np.zeros(self.nzon)\n dm[0] = 4. * np.pi / 3. * (self.r[0] ** 3 - self.r_cen ** 3) * self.rho[0]\n for i in range(1, self.nzon):\n dm[i] = 4. / 3. * np.pi * (self.r[i] ** 3 - self.r[i - 1] ** 3) * self.rho[i]\n # print(f' M_tot(Density) = {np.sum(dm)/phys.M_sun:.3f}')\n return np.sum(dm)", "def getDensity(h, R_w, R_sun): # k is a fitting constant\n\n R = np.sqrt(R_w**2+h**2)\n r = R/R_sun # units need to be in solar radii \n a = 77.1\n b = 31.4\n c = 0.954\n d = 8.30\n e = 0.550\n f = 4.63\n\n return (a*r**(-b) + c*r**(-d) + e*r**(-f))*10**8 #[cm-3]", "def molar_mass_dry_air():\n return 28.9647", "def mean_free_path(self):\n return self.dynamic_viscosity / self.density * np.sqrt(np.pi * 1e-3*self.molar_mass / (2*gas_constant * self.temperature))", "def compute_energy_density(kT):\n h=u.planck\n c=u.speed_of_light\n pi=np.pi\n return (8*pi/(h*c)**3)*((pi*kT)**4/15)", "def c_gas(j,rhoA,PA):\n return np.sqrt( PA[j] / rhoA[j] )", "def air_density(self):\n return self.flow_field.air_density", "def density(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n dliq = g_p**(-1)\n return dliq", "def test_density(self):\n earth = CoreMantleCrustModel()\n assert earth.density(0) == 14\n assert earth.density(1e6) == 14\n assert earth.density(3.464e6) == 14\n assert earth.density(3.5e6) == 3.4\n assert earth.density(5e6) == 3.4\n assert earth.density(6.338e6) == 3.4\n assert earth.density(6.378e6) == 2.9", "def getDensityEstimate(self):\n return self.density", "def density(self):\n return self.get_density()", "def population_density(self) -> float:\n return self.population / self.area", "def density( self ) :\n return self.__density", "def density( self ) :\n return self.__density", "def density( self ) :\n return self.__density", "def density(self):\n return self.num_arcs() / (self.nframes / FRATE)", "def fOilDensity(APIGravity, GasOilRatioOFU, GasGravity, Temperature, Pressure):\t\n\tT = Temperature\n\tP = Pressure / 145.038 # converts psia to MPa.\n\tGasOilRatio=GasOilRatioOFU*(28.3168/158.9873) # Converts scf/bbl to l/l\n\n\t# A reference density that can be used to characterize an oil Rho_0 is measured\n\t# at 15.6 degC and atmospheric pressure.\n\tRho_0 = 141.5 / (APIGravity + 131.5)\n\n\t# B_0 is a volume factor derived by Standing (1962)\n\tB_0 = 0.972 + 0.00038 * ((2.4 * GasOilRatio * ((GasGravity/Rho_0)**0.5) + T + 1.78)**1.175)\n\n\t# True densities of live oils are also calculated using B_0, but\n\t# the mass of dissolved gas must be included.\n\tRho_G = (Rho_0 + 0.0012*GasGravity*GasOilRatio) / B_0\n\n\t# The pressure dependence is comparatively small and the published data for density at\n\t# pressure pp can be described by the polynomial\n\tRho_GP = Rho_G + (0.00277*P - 1.71e-7*(P**3)) * ((Rho_G - 1.15)**2) + (3.49e-4*P)\n\n\t# The effect of temperature is larger, and one of the most\n\t# common expressions used to calculate the in-situ density\n\t# was developed by Dodson and Standing (1945).\n\t# Rho_T = Rho_P / (0.972 + 0.000381 * ((T + 17.78) ** 1.175))\n\t# This is accounted for in the B_0 and Rho_G terms which collapse when GasOilRation = 0\n\n\treturn Rho_GP", "def pseudoDensity(\n self,\n Tk=None,\n Tc=None,\n ):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"pseudoDensity\", Tk)\n inv_Tk = 1.0 / getTk(Tc, Tk)\n rho_kgPerM3 = 1.15675e03 * inv_Tk**2 + 3.43413e02 * inv_Tk + 2.99731e-03\n return rho_kgPerM3 / G_PER_CM3_TO_KG_PER_M3", "def gas_zfactor(T_pr, P_pr):\n # T_pr : calculated pseudoreduced temperature\n # P_pr : calculated pseudoreduced pressure \n from scipy.optimize import fsolve # non-linear solver\n import numpy as np\n\n a1 = 0.3265; a2 = -1.0700; a3 = -0.5339; a4 = 0.01569; a5 = -0.05165; a6 = 0.5475\n a7 = -0.7361; a8 = 0.1844; a9 = 0.1056; a10 = 0.6134; a11 = 0.7210\n\n def f(y):\n rho_pr, z = y\n c1 = a1 + (a2/T_pr) + (a3/(T_pr**3))+ (a4/(T_pr**4))+ (a5/(T_pr**5))\n c2 = a6 + (a7/T_pr) + (a8/(T_pr**2))\n c3 = a9*((a7/T_pr) + (a8/(T_pr**2)))\n c4 = (a10)*(1+(a11*(rho_pr**2)))*((rho_pr**2)/(T_pr**3))*(np.exp(-a11*(rho_pr**2)))\n\n f1 = z + (c3*(rho_pr**5)) - (c2*(rho_pr**2)) - (c1*(rho_pr**1)) - c4 - 1\n f2 = rho_pr - ((0.27 * P_pr) / (z * T_pr))\n return[f1, f2]\n\n solve = fsolve(f, [1, 1]) # initial guess\n return(solve[0], solve[1]) # result is density, z-factor", "def calculate_density(composition):\n density = 0.0\n\n for z, fraction in composition.items():\n density += fraction / ep.mass_density_kg_m3(z)\n\n return 1.0 / density", "def _density(self):\n fraction = np.array([0.]+[m.value for m in self.fraction])\n # TODO: handle invalid fractions using penalty functions\n # S = sum(fraction)\n # scale = S/100 if S > 100 else 1\n # fraction[0] = 100 - S/scale\n # penalty = scale - 1\n fraction[0] = 100 - sum(fraction)\n if (fraction < 0).any():\n return NaN\n volume = self._volume(fraction)\n density = np.array([m.density() for m in [self.base]+self.material])\n return np.sum(volume*density)", "async def air_density(self, temperature, station_pressure):\n if temperature is not None and station_pressure is not None:\n kelvin = temperature + 273.15\n pressure = station_pressure\n r_specific = 287.058\n decimals = 2\n\n air_dens = (pressure * 100) / (r_specific * kelvin)\n\n if self._unit_system == UNITS_IMPERIAL:\n air_dens = air_dens * 0.06243\n decimals = 4\n\n return round(air_dens, decimals)\n\n _LOGGER.error(\"FUNC: air_density ERROR: Temperature or Pressure value was reported as NoneType. Check the sensor\")", "def number_density(self) -> u.m**-3:\n return self._number_density", "def density(self, psi):\n return np.square(np.abs(psi))", "def dh(z,h=0.7,omegalambda=0.7,omegam=0.3,omegak=0.0):\n return distcalc(z,h,omegalambda,omegam,omegak)['dh']", "def dc(z,h=0.7,omegalambda=0.7,omegam=0.3,omegak=0.0):\n return distcalc(z,h,omegalambda,omegam,omegak)['dc']", "def density(self):\n return self.nnz / self.size", "def omega_plasma(number_density, mass):\n return np.sqrt(4 * np.pi * number_density * cgs.e**2 / mass)", "def get_density(element):\n return pt.elements.isotope(element).density", "def D(z):\n k=0.01 #Our choice of large-scale mode\n mPk=cosmo.pk(k,z)\n mPk_norm=cosmo.pk(k,0) #Normalize at z=0\n D=np.sqrt(mPk/mPk_norm)\n return D", "def get_f_s_gas(p: float, h: float) -> float:\n return 5.823109493752840 * 10 ** (-2) * p ** 4 \\\n - 3.309666523931270 * 10 ** (-1) * p ** 3 \\\n + 7.700179914440890 * 10 ** (-1) * p ** 2 \\\n - 1.311726004718660 * p \\\n + 1.521486605815750 * 10 ** (-9) * h ** 4 \\\n - 2.703698863404160 * 10 ** (-6) * h ** 3 \\\n + 1.793443775071770 * 10 ** (-3) * h ** 2 \\\n - 5.227303746767450 * 10 ** (-1) * h \\\n + 1.100368875131490 * 10 ** (-4) * p ** 3 * h \\\n + 5.076769807083600 * 10 ** (-7) * p ** 2 * h ** 2 \\\n + 1.202580329499520 * 10 ** (-8) * p * h ** 3 \\\n - 7.278049214744230 * 10 ** (-4) * p ** 2 * h \\\n - 1.449198550965620 * 10 ** (-5) * p * h ** 2 \\\n + 5.716086851760640 * 10 ** (-3) * p * h \\\n + 5.818448621582900 * 10", "def calculate(self, density):\n if density not in self.potential_memo:\n\n if density == 0:\n self.potential_memo[density] = 0\n else:\n a = self.a\n x_0 = self.x_0\n b = self.b\n c = self.c\n x = self.wigner_seitz_radius(density)**(1/2)\n x_x = x**2 + b * x + c\n x_x_0 = x_0**2 + b * x_0 + c\n q = (4 * c - b**2)**(1/2)\n\n self.potential_memo[density] = a * (log(x**2 / x_x) + (2 * b / q) * atan(q / (2 * x + b))\n - (b * x_0 / x_x_0) * (log((x - x_0)**2 / x_x) + (2 * (b + 2 * x_0) / q) * atan(q / (2 * x + b)))) \\\n - (a / 3) * ((1 + x_0 * x) / (1 + x_0 * x + b * x**2 + c * x**3))\n\n return self.potential_memo[density]", "def density_from_pressure(temperature, pressure, RH):\n # R = specific gas constant , J/(kg*degK) = 287.05 for dry air\n Rd = 287.05\n # http://www.baranidesign.com/air-density/air-density.htm\n # http://wahiduddin.net/calc/density_altitude.htm\n # Evaporation into the Atmosphere, Wilfried Brutsaert, p37\n # saturation vapor pressure is a polynomial developed by Herman Wobus\n e_so = 6.1078\n c0 = 0.99999683\n c1 = -0.90826951e-2\n c2 = 0.78736169e-4\n c3 = -0.61117958e-6\n c4 = 0.43884187e-8\n c5 = -0.29883885e-10\n c6 = 0.21874425e-12\n c7 = -0.17892321e-14\n c8 = 0.11112018e-16\n c9 = -0.30994571e-19\n \n p = (c0 + temperature*(\n c1 + temperature*(\n c2 + temperature*(\n c3 + temperature*(\n c4 + temperature*(\n c5 + temperature*(\n c6 + temperature*(\n c7 + temperature*(\n c8 + temperature*(\n c9)))))))))) \n \n sat_vp = e_so / p**8\n Pv = sat_vp * RH\n density = (pressure / (Rd * temperature)) * (1 - (0.378 * Pv / pressure))\n return density", "def test_densities():\n\n actual, r, wt = GridGenerator.make_grid(400)\n grid = 4*pi*r**2*wt\n\n data = AtomData()\n\n print(\"\\nINTEGRATED DENSITY TEST\")\n print(\"=======================\")\n for a in list(data.nuclear_charge.keys()):\n atom = Atom(a)\n Nel = data.electron_count[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n # Count electrons per spin channel\n s_occ = AtomData.s_occ.get(a, [0, 0])\n p_occ = AtomData.p_occ.get(a, [0, 0])\n d_occ = AtomData.d_occ.get(a, [0, 0])\n f_occ = AtomData.f_occ.get(a, [0, 0])\n nela = np.sum(s_occ[0])+np.sum(p_occ[0])+np.sum(d_occ[0])+np.sum(f_occ[0])\n nelb = np.sum(s_occ[1])+np.sum(p_occ[1])+np.sum(d_occ[1])+np.sum(f_occ[1])\n assert(nela+nelb == Nel)\n\n id0 = np.dot(d0, grid)\n id1 = np.dot(d1, grid)\n\n diff_0 = id0 - nela\n percent_diff_0 = 100*diff_0/nela\n\n # Check to catch for Hydrogen having no beta electrons\n if nelb > 0.0:\n diff_1 = id1 - nelb\n percent_diff_1 = 100*diff_1/nelb\n else:\n diff_1 = 0.0\n percent_diff_1 = 0.0\n\n print(\"{:>3} - N_0 = ({:4.1f}) {:+2.6e}%, N_1 = ({:4.1f}) {:+2.6e}%, {:}\".format(a, id0, percent_diff_0, id1, percent_diff_1, \"PASSED\" if max(abs(diff_0), abs(diff_1)) < 1e-4 else \"FAILED - \"))\n\n print(\"\\nINTEGRATED KINETIC TEST\")\n print(\"=======================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n t_bm = data.ke_test[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n it0 = np.dot(t0, grid)\n it1 = np.dot(t1, grid)\n itot = it0 + it1\n\n diff = itot - t_bm\n print(\"{:>3} - T = {:+.6e}%, {:}\".format(a, 100*diff/t_bm, \"PASSED\" if abs(100*diff/t_bm) < 1e-2 else \"FAILED - \"))\n\n\n # The integral of the Laplacian over all space should be 0. Check that.\n print(\"\\nINTEGRATED LAPLACIAN TEST\")\n print(\"=========================\")\n for a in list(AtomData.ke_test.keys()):\n atom = Atom(a)\n\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n il0 = np.dot(grid, l0)\n il1 = np.dot(grid, l1)\n print(\"{:>3} - L_0 = {:+.6e}, L_1 = {:+.6e}, {:}\".format(a, il0, il1, \"PASSED\" if max(abs(il0), abs(il1)) < 1e-6 else \"FAILED - \"))\n\n\n print(\"\\nFINITE DIFFERENCE GRADIENT TEST\")\n print(\"===============================\")\n print(\"Testing gradient evaluation function against finite difference estimate...\")\n ne = Atom(\"Ne\") # Let's use \"the guvnor\"\n # We only need to test a few points around the core\n fdh = 1e-8\n fdr = np.arange(0.9, 0.9+fdh*10, fdh)\n d0, d1, g0, g1, t0, t1, l0, l1 = ne.get_densities(fdr)\n\n # First the first central difference\n fdiff = (d0[2:]-d0[:-2])/(2*fdh) # Construct the central difference\n if np.allclose(fdiff, g0[1:-1], atol=1e-1): # finite difference is not perfect, so lenient tollerance\n print(\"Gradient: PASSED\")\n else:\n print(\"Gradient: FAILED -\")\n\n print(\"\\nELEMENT COLOR FUNCTIONS TEST\")\n print(\"===========================\")\n test_obj = [Atom(\"H\"), Atom(\"C\"), Atom(\"O\")]\n test_str = [\"H\", \"C\", \"O\"]\n ref = np.array([[1., 1., 1.], [0.565, 0.565, 0.565], [1. , 0.051, 0.051]])\n\n if np.allclose( np.array(get_colors_for_elements(test_obj)), ref):\n print(\"\\nColor from objects: PASSED\")\n else:\n print(\"\\nColor from objects: FAILED -\")\n\n if np.allclose( np.array(get_colors_for_elements(test_str)), ref):\n print(\"Color from strings: PASSED\")\n else:\n print(\"Color from strings: FAILED -\")\n\n if HAVE_LIBXC:\n test_functional='GGA_X_PBE'\n print(\"\\nATOMIC EXCHANGE ENERGIES WITH {}\".format(test_functional))\n print(\"============================================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n nE, vrho, vsigma, vtau, vlapl = atom.libxc_eval(r, functional=test_functional, restricted=False)\n Exc = (np.dot(nE, grid)).item()\n print('{:3s} {:.10f}'.format(a, Exc))\n else:\n print(\"\\nNot doing energy calculations due to lack of libxc.\\n\")", "def density(self):\n return self.nnz/self.dim", "def fWaterDensity(Salinity, GasWaterRatio, Temperature, Pressure):\n\tTemp = Temperature\n\tPress = Pressure / 145.038\n\tSal = Salinity / 1000\n\tA = (-80 * Temp) + (-3.3 * (Temp**2)) + (0.00175 * (Temp**3))\n\tB = (489 * Press) + (-2 * Temp * Press) + (0.016 * (Temp**2) * Press)\n\tC = (-0.000013 * (Temp**3) * Press) + (-0.333 * (Press**2)) + (0.002 * Temp * (Press ** 2))\n\tPureWaterDensity = 1 + ((A + B + C) * 1e-6)\n\tA = 80 + (3 * Temp) + (-3300 * Sal) + (-13 * Press) + (47 * Press * Sal)\n\tB = (300 * Press) + (-2400 * Press * Sal)\n\tC = 0.000001 * (B + (Temp * A))\n\tD = 0.668 + (0.44 * Sal)\n\treturn PureWaterDensity + (Sal * (D + C))", "def density(self):\n return self._density", "def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)", "def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)", "def Density(material):\n if material == \"mild\":\n return 7850.0\n else:\n if material == \"al\":\n return 2700.0\n else:\n raise ValueError(\"Invalid material `\"+material+\"'\")", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def da(z,h=0.7,omegalambda=0.7,omegam=0.3,omegak=0.0):\n return distcalc(z,h,omegalambda,omegam,omegak)['da']", "def fGasAcousticVelocity(GasGravity, Temperature, Pressure):\n\tGasBulkModulus = fGasBulkModulus(GasGravity, Temperature, Pressure) # Pascals\n\tGasDensity = fGasDensity(GasGravity, Temperature, Pressure) * 1000 # Kg\n\treturn (GasBulkModulus / GasDensity)**0.5 # m/s", "def dm(z,h=0.7,omegalambda=0.7,omegam=0.3,omegak=0.0):\n return distcalc(z,h,omegalambda,omegam,omegak)['dm']", "def quasiparticle_weight(omegas, sigma):\n dw = omegas[1] - omegas[0]\n win = (-dw <= omegas) * (omegas <= dw)\n dsigma = np.polyfit(omegas[win], sigma.real[win], 1)[0]\n z = 1/(1 - dsigma)\n if z < 0.01:\n z = 0\n return z", "def dry_snow_density(self):\n return (self.rho - self.h2o_vol * RHO_W0) / \\\n (1 - self.h2o_vol * RHO_W0 / RHO_ICE)", "def debye_length_m(electron_density, electron_temperature):\n return 0.069 * np.sqrt(electron_temperature / electron_density)", "def compute_density_geo(cur, N):\n ## geometric average cardinality\n avg_card = 1\n\n for n in range(N):\n card = get_parameter(cur, par=(\"card_B%d\" % n))\n avg_card *= card\n avg_card = math.pow(avg_card, 1.0/N)\n\n ## average mass\n if avg_card == 0:\n return (-1, 0)\n else:\n return (get_parameter(cur, par=\"B_mass\") / avg_card, avg_card)", "def energy(n):\n return (n * pi * hbar / (2 * a)) ** 2 / (2 * m)", "def solid_surface_density(M, a, delta_a):\n sigma_solid = (M*gen.Mearth*1e3)/(2.*np.pi*(a*gen.AU)*(delta_a*gen.AU))\n return sigma_solid", "def mass(self):\n\t\treturn self.volume*self.density", "def density(x, kind=\"geopotential\"):\n\n rho = table(x, kind)[3]\n return rho", "def get_density(self, asset=None):\n if asset is None or 'pc:density' not in asset.properties:\n return self.item.properties.get('pc:density')\n else:\n return asset.properties.get('pc:density')", "def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )", "def current_density(self, xyz):\n\n j = self.electric_field(xyz) / self.rho\n return j", "def Test_Diameter(Graph):\n\n Durchmesser = M_Graph.get_Diameter(Graph)\n KPS = float(sum(Durchmesser)) / float(len(Durchmesser))\n\n return KPS", "def get_diameter(self):\n\n if self.no_dist is False:\n dist = self.distance\n diam = dist * self.ang_size / 60. * np.pi/180. * ct._kpc_over_pc_\n self.diam = diam\n else:\n self.diam = -1 # use -1 to indicate unknown diameter\n\n return self.diam", "def get_diameter(self) -> float:\r\n \r\n return (self.box[3] - self.box[1] + self.box[2] - self.box[0]) / 2", "def calculate_solvent_density(solvent_file):\n num_solvent_molecules = get_num_solvent_molecules(solvent_file)\n moles_solvent_molecules = num_solvent_molecules/6.02e23\n box_volume_cubic_angstroms = get_box_volume(solvent_file)\n if box_volume_cubic_angstroms == None:\n print(\"ERROR: The box volume wasn't calculated correctly.\")\n exit()\n liter_conversion = 1.0e-27\n box_volume_liters = liter_conversion*box_volume_cubic_angstroms\n solvent_density = moles_solvent_molecules/box_volume_liters\n return(solvent_density)", "def get_fiber_density():\n return Global_Module.global_fiber_density", "def number_densities(self) -> u.m**-3:\n try:\n return (self.n_elem * self.ionic_fractions).to(u.m**-3)\n except Exception: # noqa: BLE001\n return np.full(self.atomic_number + 1, np.nan) * u.m**-3", "def density(self, arg):\n out = 0\n for weight, mean, std in zip(self.weights, self.means, self.stds):\n scale = std * self.data['maturity']**.5\n loc = ((mean - self.data['riskfree']) *\n self.data['maturity'] - scale**2)\n out += weight * scs.norm(loc, scale).pdf(arg)\n return out", "def angular_diameter_distance(self, z, z0 = 0.0):\n\n\n dm2 = self.comoving_distance_transverse(z)\n if z0 == 0.0:\n return(dm2/(1.+z))\n else:\n if self.k0 < 0.0:\n raise cex.CosmologyUnapplicable(\"Not for Omega_k < 0\")\n dm1 = self.comoving_distance_transverse(z0)\n d_h_0 = self.hubble_distance_z(0.0)\n term1 = dm1 * np.sqrt(1.+self.k0*(dm2/d_h_0)**2.)\n term2 = dm2 * np.sqrt(1.+self.k0*(dm1/d_h_0)**2.)\n da12 = (term2-term1)/(1.+z) # only for Omega_k > 0\n return(da12)", "def _ion_densities(self):\n nD = self.ne_in*(6-self.zeff_in)/(5.)\n nC = self.ne_in*(self.zeff_in-1)/(30.)\n nC[np.where(nC<0)]=0.\n print(\"nC/nD: \"+str(np.mean(nC/nD)*100.)+\" %\")\n self.ni_in[0,:] = nD\n self.ni_in[1,:] = nC", "def calc_gasSD_inside_half_mass(galObj, gas_m, gas_pos, halfMassR='gas'):\n if halfMassR == 'gas':\n half_mass_radius = galObj.radii['gas_half_mass'].in_units('kpc')\n elif halfMassR == 'stars':\n half_mass_radius = galObj.radii['stellar_half_mass'].in_units('kpc')\n\n extent = np.sqrt(gas_pos[:, 0]**2 + gas_pos[:, 1]**2 + gas_pos[:, 2]**2)\n mask = extent <= half_mass_radius\n gasSD = np.sum(gas_m[mask])/np.pi/(half_mass_radius*1.e3)**2\n print(\"gas SD from particles within half-mass: \")\n print(gasSD)\n print(\"gas SD from global gas mass: \")\n print(galObj.masses['gas'] / np.pi / (half_mass_radius*1.e3)**2)\n\n print(galObj.masses['gas'])\n print(np.sum(gas_m[mask]))\n # hmmm.....\n # import pdb; pdb.set_trace()\n return gasSD", "def density(self):\n self.convert_window(\"Density\", \"kilograms/liter\", [\"grains/gallon(UK)\", \"grains/gallon(US)\", \"grams/cubic centimeters\", \"grams/liter\", \"grams/millimeters\", \"kilograms/cubic meters\", \"kilograms/liter\", \"megagrams/cubic meter\", \"milligrams/liter\", \"milligrams/millimeters\", \"ounces/cubic inch\", \"ounces/gallon(UK)\", \"ounces/gallon(US)\", \"pounds/cubic foot\", \"pounds/cubic inch\", \"pounds/gallon(UK)\", \"pounds/gallon(US)\", \"slugs/cubic foot\", \"tonnes/cubic meter\", \"tons(UK)/cubic yard\", \"tons(US)/cubic yard\"])", "def density(self, arg):\n mean = - self.sigma**2 * self.data['maturity']\n std = self.sigma * self.data['maturity']**.5\n return scs.norm(mean, std).pdf(arg)", "def compute_sigma(\n T,\n S,\n z,\n **kwargs,\n):\n return compute_rho(T, S, z, **kwargs) - 1000", "def density(self, arg):\n return self.gb2_density(np.exp(arg)) * np.exp(arg)", "def density(self) -> float:\n if self.is_directed():\n factor = 1\n else:\n factor = 2\n\n num_e = self._Impl.number_of_edges(directed_edges=True)\n num_v = self._Impl.number_of_vertices()\n\n density = (factor * num_e) / (num_v * (num_v - 1))\n return density", "def get_flux_density(self):\n if self.no_flux is False:\n return self.snu_at_1GHz\n else:\n return -1", "def _q_z(self):\n D = self.latt_par['D'].value\n lambda_r = self.latt_par['lambda_r'].value\n gamma = self.latt_par['gamma'].value\n return 2*np.pi*(self.h/D - self.k/lambda_r/np.tan(gamma))", "def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)", "def _epsilon(vds) -> np.ndarray:\n return vds[\"rhod_tot\"] / vds[\"rho\"]", "def compute_density_ari(cur, N):\n ## arithmetic average cardinality\n avg_card = 0\n for n in range(N):\n card = get_parameter(cur, par=(\"card_B%d\" % n))\n avg_card += card / float(N)\n\n ## average mass\n if avg_card == 0:\n return (-1, 0)\n else:\n return (get_parameter(cur, par=\"B_mass\") / avg_card, avg_card)", "def current_density(self, xyz_m, xyz_n=None):\n\n j = self.electric_field(xyz_m, xyz_n=xyz_n) / self.rho\n return j", "def rho(u,param):\n \n c=param[0]\n tau=param[1]\n rho=0\n \n K=len(c)\n \n for k in range(1,K+1): ## Sum up the K Derivatives of Gaussian\n rho=rho+c[k-1,0]*DoG(u,k,tau)\n \n return rho", "def get_fiber_density_average():\n return Global_Module.global_fiber_density_with_average", "def testDensityCalculation(self):\n known_densities = np.array([1.76776695e-01, 1.76776695e-01, 1.76776695e-01,\n 4.59619433e-01, 4.59619433e-01, 1.76776695e-01, 5.00000000e-01, \n 8.84538011e-02, 3.40206909e-02, 2.26040275e-04])\n densities = nb._get_local_densities() \n np.testing.assert_allclose(densities, known_densities)", "def calculations():\r\n\t\r\n\tpayload, avionics, booster = weight_input()\r\n\r\n\tdrogue_size, drogue_force = drogue_calc()\r\n\tmain_size, main_force = main_calc(avionics, booster, drogue_force) #total mass, payload detaches\r\n\r\n\tprint(\"Drogue is diameter is \" + str(drogue_size) + \" inches\")\r\n\tprint(\"Main is diameter is \" + str(main_size) + \" inches\")", "def density(wair,pres,entr=None,temp=None,airf=None,dhum=None,\n chkvals=False,chktol=_CHKTOL,airf0=None,temp0=None,dhum0=None,\n chkbnd=False,mathargs=None):\n airf, temp, dhum = eq_wpte(wair,pres,entr=entr,temp=temp,airf=airf,\n dhum=dhum,chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,\n dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n h_p = iceair_h(0,0,1,wair,pres,temp=temp,airf=airf,dhum=dhum)\n dens = h_p**(-1)\n return dens", "def magnetic_flux_density(self, xyz):\n return self.mu * self.magnetic_field(xyz)", "def magnetic_flux_density(self, xyz):\n return self.mu * self.magnetic_field(xyz)", "def rho(SA, CT, p):\n\n SA = np.maximum(SA, 0)\n\n xs = np.sqrt(sfac * SA + soffset)\n ys = CT * 0.025\n z = p * 1e-4\n\n specific_volume = (v000\n + xs * (v100 + xs * (v200 + xs * (v300 + xs * (v400 + xs * (v500\n + xs * v600)))))\n + ys * (v010\n + xs * (v110 + xs * (v210 + xs * (v310 + xs * (v410 + xs * v510))))\n + ys * (v020 + xs * (v120 + xs * (v220 + xs * (v320 + xs * v420)))\n + ys * (v030 + xs * (v130 + xs * (v230 + xs * v330))\n + ys * (v040 + xs * (v140 + xs * v240)\n + ys * (v050 + xs * v150 + ys * v060)))))\n + z * (v001\n + xs * (v101 + xs * (v201 + xs * (v301 + xs * (v401 + xs * v501))))\n + ys * (v011 + xs * (v111 + xs * (v211 + xs * (v311 + xs * v411)))\n + ys * (v021 + xs * (v121 + xs * (v221 + xs * v321))\n + ys * (v031 + xs * (v131 + xs * v231)\n + ys * (v041 + xs * v141 + ys * v051))))\n + z * (v002\n + xs * (v102 + xs * (v202 + xs * (v302 + xs * v402)))\n + ys * (v012 + xs * (v112 + xs * (v212 + xs * v312))\n + ys * (v022 + xs * (v122 + xs * v222)\n + ys * (v032 + xs * v132 + ys * v042)))\n + z * (v003\n + xs * (v103 + xs * v203)\n + ys * (v013 + xs * v113 + ys * v023)\n + z * (v004 + xs * v104 + ys * v014\n + z * (v005 + z * v006))))))\n\n return 1. / specific_volume", "def spherearea(dia):\n r = dia*1e-4 # convert to cm\n return(4*np.pi*r**2)", "def area(diam):\n radius = diam / 2\n return(pi * radius * radius)", "def __density(self, x):\n\n z = np.power(self.rate, x) / m.factorial(x)\n return z * np.exp(-self.rate)", "def _dsurface_domega(self):\n\n dsdo = 0.\n\n return dsdo" ]
[ "0.74058163", "0.7005451", "0.68938565", "0.6806513", "0.6706316", "0.6634284", "0.6574425", "0.65566623", "0.651678", "0.6436798", "0.63436514", "0.633899", "0.6326834", "0.6320278", "0.63081455", "0.6297625", "0.62733966", "0.62247926", "0.6177332", "0.61762756", "0.61559004", "0.6152232", "0.6147758", "0.6104676", "0.60844386", "0.60844386", "0.60844386", "0.607733", "0.60687613", "0.6063257", "0.6036793", "0.6026475", "0.6015842", "0.6000556", "0.5994583", "0.59605765", "0.5959564", "0.5929324", "0.59020954", "0.58846486", "0.58622205", "0.58520997", "0.5843909", "0.58402973", "0.58348286", "0.58288586", "0.58234596", "0.58148503", "0.58134997", "0.58068895", "0.58068895", "0.5790532", "0.57716006", "0.57622457", "0.57457036", "0.57367814", "0.57340026", "0.57123494", "0.5683477", "0.5673309", "0.56688255", "0.56672955", "0.5666337", "0.56621414", "0.56536776", "0.56331456", "0.56256795", "0.5613808", "0.56095403", "0.55956465", "0.5591036", "0.55651134", "0.555664", "0.55512244", "0.5546596", "0.55396366", "0.55354357", "0.5530724", "0.552284", "0.5520192", "0.55164576", "0.55150795", "0.55131125", "0.5511935", "0.55079347", "0.5502775", "0.55014247", "0.5481211", "0.5459559", "0.5454922", "0.5450437", "0.5442318", "0.5439425", "0.54349715", "0.54349715", "0.5429878", "0.54216737", "0.54193866", "0.54177827", "0.5413006" ]
0.81784177
0
Vapour pressure in Pascals.
def vapour_pressure(self): return self.relative_humidity * self.solvent.equilibrium_vapour_pressure(self.temperature)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PPV(self):\n return _div(self.TP, self.TP + self.FP)", "def vaporPressure(temp: float) -> float:\n exponent = (17.27*temp)/(temp + 237.3)\n vp = 611*np.exp(exponent)\n\n return vp", "def VaporPressure(dwpt):\n\n return 611.2*exp(17.67*dwpt/(243.5+dwpt))", "def volt_to_pressure(volt):\n return volt/10", "def vapor_pressure(ds, var):\n eps0 = 0.622 # Ratio of molecular weight of water and dry air [-]\n ds['vp'] = 0.5 * ds[var['pressure']] * (-1 + np.sqrt(1 + 4 * ds[var['mix_ratio']] / eps0))\n ds.vp.attrs = {'units': '???????', 'long_name': 'Vapor pressure', 'standard_name': 'vapor_pressure'}\n return ds", "def get_pressure(self):\n value = self.synth.cbox.get_adcs()[self.id_]\n value = value / self.conf['PSICONV']\n log.debug(\"Current pressure on regulator %d = %f\",\n self.id_, value)\n return value", "def calcPfePres(voltage: float):\n # V → Torr\n exponent = 1.667 * voltage - 11.46\n pres = 10**exponent\n return pres", "def test_pressure_profile():\n q = 1e-3\n D = .1\n L = 10.0\n S = 1\n c = 12.4 * D ** 3\n C_eff = 12 * c / L\n C = c / L\n S_eff = 1 / (1 / C_eff + 1 / S)\n Q = q * D * pi * L\n P_max = Q * (1 / (8 * C) + 1 / S)\n P_av = Q / S_eff\n\n vac_sys = VacuumSystem()\n vac_sys.add_chamber(S=S, Q=0.)\n for _ in range(10):\n vac_sys.add_tube(L, D, q=q)\n vac_sys.add_chamber(S=S, Q=.0)\n solve_vac_system(vac_sys)\n tube = vac_sys.components[11]\n assert isclose(np.mean(tube.P),P_av,rel_tol=.1) and isclose(np.max(tube.P),P_max,rel_tol=.1)", "def solar_ppa():\n per_kwh = 0.196 # [$/kWh]\n\n return per_kwh", "def P(self):\n return self.generic_getter(get_pressure, \"p\", \"convert_pressure\")", "def getPureComponentVaporPressure(self,Temperature):\n\t\tA = self.Antoine_params[0]\n\t\tB = self.Antoine_params[1]\n\t\tC = self.Antoine_params[2]\n\t\t\n\t\t# Antoine's Equation\n\t\tPmmHg = 10**(A - B / (C + Temperature - 273.15))\n\t\treturn PmmHg * 133.322 # to get Pa", "def get_pressure(self):\n return self._sense_hat.get_pressure() * 0.0295300", "def get_chamber_pressure(self):\n raise NotImplementedError", "def cov2pv(self, P):\n return P", "def pressure(self):\n self.convert_window(\"Pressure\", \"pascals\", [\"atm\", \"bars\", \"centimeters mercury\", \"centimeters water\", \"feet of water\", \"hectopascals\", \"inches of mercury\", \"inches of water\", \"kilogram-force/sq.centimeter\", \"kilogram-force/sq.meter\", \"kilonewtons/sq.meter\", \"kilonewtons/sq.millimeter\", \"kilopascals\", \"kips/sq.inch\", \"meganewtons/sq.meter\", \"meganewtons/sq.millimeter\", \"meters of water\", \"millibars\", \"millimeters of mercury\", \"millimeters of water\", \"newtons/sq.centimeter\", \"newtons/sq.meter\", \"newtons/sq.millimeter\", \"pascals\", \"poundals/sq.foot\", \"pounds-force/sq.foot\", \"pounds-force/sq.inch\", \"tonnes-force/sq.cm\", \"tonnes-force/sq.meter\", \"tons(UK)-force/sq.foot\", \"tons(UK)-force/sq.inch\", \"tons(US)-force/sq.foot\", \"tons(US)-force/sq.inch\", \"torr\"])", "def compute_pressure(self, windkessel_volumes):\n # Extract the relevant volumes from the inputs.\n vart = windkessel_volumes['art']\n vven = windkessel_volumes['ven']\n\n # Extract relevant model parameters.\n cven = self.parameters['venous_compliance']\n cart = self.parameters['arterial_compliance']\n vven_rest = self.parameters['venous_resting_volume']\n vart_rest = self.parameters['arterial_resting_volume']\n\n # Compute the venous and arterial pressures.\n p = {'art': (vart - vart_rest)/cart, 'ven': (vven - vven_rest)/cven}\n return p", "def cov2pv(self, P):\n raise NotImplementedError", "def pressure(self):\n return {'art': self._part, 'ven': self._pven}", "def pressure(self):\n return {'art': self._part, 'ven': self._pven}", "def pressure(self):\n return {'art': self._part, 'ven': self._pven}", "def calc_VPD(t_celsius, rel_humidity):\n # according to Licor LI-6400 manual pg 14-10\n # and Buck AL (1981). New equations for computing vapor pressure and\n # enhancement factor. J Appl Meteor 20:1527-1532\n vp_sat = 0.61365 * math.exp((17.502 * t_celsius) / (240.97 + t_celsius))\n\n vp_air = vp_sat * rel_humidity\n return vp_sat - vp_air # or vp_sat * (1 - rel_humidity)", "def pressure(self):\r\n self._read_temperature()\r\n\r\n # Algorithm from the BME280 driver\r\n # https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c\r\n adc = self._read24(_BME280_REGISTER_PRESSUREDATA) / 16 # lowest 4 bits get dropped\r\n var1 = float(self._t_fine) / 2.0 - 64000.0\r\n var2 = var1 * var1 * self._pressure_calib[5] / 32768.0\r\n var2 = var2 + var1 * self._pressure_calib[4] * 2.0\r\n var2 = var2 / 4.0 + self._pressure_calib[3] * 65536.0\r\n var3 = self._pressure_calib[2] * var1 * var1 / 524288.0\r\n var1 = (var3 + self._pressure_calib[1] * var1) / 524288.0\r\n var1 = (1.0 + var1 / 32768.0) * self._pressure_calib[0]\r\n if var1 == 0:\r\n return 0\r\n if var1:\r\n pressure = 1048576.0 - adc\r\n pressure = ((pressure - var2 / 4096.0) * 6250.0) / var1\r\n var1 = self._pressure_calib[8] * pressure * pressure / 2147483648.0\r\n var2 = pressure * self._pressure_calib[7] / 32768.0\r\n pressure = pressure + (var1 + var2 + self._pressure_calib[6]) / 16.0\r\n\r\n pressure /= 100\r\n if pressure < _BME280_PRESSURE_MIN_HPA:\r\n return _BME280_PRESSURE_MIN_HPA\r\n if pressure > _BME280_PRESSURE_MAX_HPA:\r\n return _BME280_PRESSURE_MAX_HPA\r\n return pressure\r\n else:\r\n return _BME280_PRESSURE_MIN_HPA", "def test_vapor_pressure():\n assert_almost_equal(vapor_pressure(998. * units.mbar, 0.04963),\n 73.75179 * units.mbar, 5)", "def Vps(self):\n return [elem['Vp'] for elem in self.__compartments]", "def pressure(self, loading):\n return loading / (self.params[\"K\"] * (self.params[\"n_m\"] - loading))", "async def get_pressure(self) -> float: # type: ignore\n ...", "def compute_pressure(self, windkessel_volumes = None):\n # Extract the relevant volumes from the inputs.\n if windkessel_volumes is None:\n vart = self.volume['art']\n else:\n vart = windkessel_volumes['art']\n\n # Extract relevant model parameters.\n cart = self.parameters['arterial_compliance']\n\n # Compute the arterial pressure. Also return the constant venous pressure.\n p = {'art': vart/cart, 'ven': self.parameters['venous_pressure']}\n return p", "def pressure(self):\n return float(self._current_observation['pressure_mb'])", "def get_pressure(self): # This function implements the equations needed to convert the digital data into mbars\n self.digital_pressure_data()\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n temperature, dT=self.get_temperature()\n OFF = ((C_2 * (2**16)) + ((C_4 * dT)/2**7))\n SENS = (C_1 * (2**15)) + ((C_3 * dT)/(2**8))\n pressure=(((self.presadc*(SENS/(2**21)))-OFF)/(2**15))/100\n return pressure, temperature", "def ptpresionagua(self,prof_pt): #getter que halla la presion de poros en un punto\r\n p_agua=0.0\r\n if prof_pt<self.n_fret:\r\n p_agua=0.0\r\n pass\r\n else:\r\n p_agua=(prof_pt-self.n_fret)*self.gamma_h20\r\n return p_agua", "def pressure(self, values):\n self._part = float(values.get('art', self._part))\n self._pven = float(values.get('ven', self._pven))", "def pressure(self, values):\n self._part = float(values.get('art', self._part))\n self._pven = float(values.get('ven', self._pven))", "def pressure(self, values):\n self._part = float(values.get('art', self._part))\n self._pven = float(values.get('ven', self._pven))", "def pressure(altitude):\n t = temperature(altitude) # R\n if altitude <= 36152:\n p = 2116*(t/518.6)**5.256 # psf\n else:\n p = 473.1*exp(1.73-0.000048*altitude) # psf\n return p", "def test_wet_psychrometric_vapor_pressure():\n p = 1013.25 * units.mbar\n dry_bulb_temperature = 20. * units.degC\n wet_bulb_temperature = 18. * units.degC\n psychrometric_vapor_pressure = psychrometric_vapor_pressure_wet(p, dry_bulb_temperature,\n wet_bulb_temperature)\n assert_almost_equal(psychrometric_vapor_pressure, 19.3673 * units.mbar, 3)", "def pressure(self):\n names = ['anc_air_pressure']\n return self.sensor.get_with_fallback('pressure', names)", "def test_sat_vapor_pressure_scalar():\n es = saturation_vapor_pressure(0 * units.degC)\n assert_almost_equal(es, 6.112 * units.mbar, 3)", "def compute_pressure(self, windkessel_volumes = None):\n # Extract the relevant volumes from the inputs.\n if windkessel_volumes is None:\n vart = self.volume['art']\n vven = self.volume['ven']\n else:\n vart = windkessel_volumes['art']\n vven = windkessel_volumes['ven']\n\n # Extract relevant model parameters.\n cven = self.parameters['venous_compliance']\n cart = self.parameters['arterial_compliance']\n vven_rest = self.parameters['venous_resting_volume']\n vart_rest = self.parameters['arterial_resting_volume']\n\n # Compute the venous and arterial pressures.\n p = {'art': (vart - vart_rest)/cart, 'ven': (vven - vven_rest)/cven}\n return p", "def zeta(self, Ppump):\n return(self.alpha(Ppump) / 2. / self.w0(Ppump))", "def test_pressure(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.pressure[0], 223599111111.10834)", "def pressure_to_kpa(pressure):\n kpa = pressure / 10.0\n return kpa", "def pressure_ashpa(self):\n hpa = None # Variable declaration\n raw_comp1 = None # Variable declaration\n raw_comp2 = None # Variable declaration\n raw_comp3 = None # Variable declaration\n raw_pressure = None # Variable declaration\n raw_temperature = None # Variable declaration\n value_d_p1 = None # Variable declaration\n value_d_p2 = None # Variable declaration\n value_d_p3 = None # Variable declaration\n value_d_p4 = None # Variable declaration\n value_d_p5 = None # Variable declaration\n value_d_p6 = None # Variable declaration\n value_d_p7 = None # Variable declaration\n value_d_p8 = None # Variable declaration\n value_d_p9 = None # Variable declaration\n value_lsb = None # Variable declaration\n value_msb = None # Variable declaration\n value_xlsb = None # Variable declaration\n\n value_msb = self.get_pressuremsb()\n value_lsb = self.get_pressurelsb()\n value_xlsb = self.get_pressurexlsb()\n value_d_p1 = self.get_digp1()\n value_d_p2 = self.get_digp2()\n value_d_p3 = self.get_digp3()\n value_d_p4 = self.get_digp4()\n value_d_p5 = self.get_digp5()\n value_d_p6 = self.get_digp6()\n value_d_p7 = self.get_digp7()\n value_d_p8 = self.get_digp8()\n value_d_p9 = self.get_digp9()\n raw_temperature = self.temperature_ascelsius()\n raw_temperature = (raw_temperature*5120.0)\n raw_pressure = ((value_msb << 12)+(value_lsb << 4)+(value_xlsb >> 4))\n raw_comp1 = ((raw_temperature/2)-64000.0)\n raw_comp2 = ((raw_comp1*raw_comp1*value_d_p6)/32768.0)\n raw_comp2 = (raw_comp2+(raw_comp1*value_d_p5*2.0))\n raw_comp2 = ((raw_comp2/4.0)+(value_d_p4*65536.0))\n raw_comp3 = (value_d_p3*raw_comp1*raw_comp1)\n raw_comp1 = (((raw_comp3/524288.0)+(value_d_p2*raw_comp1))/524288.0)\n raw_comp1 = ((1.0+(raw_comp1/32768.0))*value_d_p1)\n hpa = (1048576.0-raw_pressure)\n hpa = ((hpa-(raw_comp2/4096.0))*(6250.0/raw_comp1))\n raw_comp1 = ((value_d_p9*hpa*hpa)/2147483648.0)\n raw_comp2 = ((hpa*value_d_p8)/32768.0)\n hpa = (hpa+((raw_comp1+raw_comp2+value_d_p7)/16.0))\n hpa = (hpa/100.0)\n return hpa", "def psu2ppt(psu):\n\n a = [0.008, -0.1692, 25.3851, 14.0941, -7.0261, 2.7081]\n return (a[1] + a[2] * psu ** 0.5 + a[3] * psu + a[4] * psu ** 1.5 + a[5] *\n psu ** 2 + a[6] * psu ** 2.5)", "def pressure(self):\n return _cantera.reactor_pressure(self.__reactor_id)", "def pressure(self, alt):\n alt_profile = self.altitude_profile(alt)\n T, number_density = alt_profile[1], alt_profile[8]\n\n # using eqn(42) of COESA76\n pressure = number_density * k * T\n return pressure", "def pp(self):\n \n return np.cross(self.v, self.w) / np.dot(self.w, self.w)", "def p(self, x):\n y = self.c500*x\n pp = y**self.gamma * (1. + y**self.alpha)**((self.beta - self.gamma)/self.alpha)\n return self.P0 / pp", "def meas_pressure(instrument):\n return volt_to_pressure(meas_voltage(instrument))", "def P_total(pressures=[]):\n total = 0.0\n for pressure in pressures:\n total += pressure\n return float(total)", "def rho(self, Ppump):\n\n etaP, EsatL, TR = self.etaP, self.EsatL, self.TR\n return(self.Psteady(Ppump) * etaP / (EsatL * TR * self.w0(Ppump)**2))", "def P(lag):\n N = len(SP)\n ratios = SP[lag:N]/SP[0:N-lag]\n P = 100.*(ratios-1.)\n return P", "def read_pressure(self):\n pRaw = self._read_multiple_bytes_as_array(self.BME280_PRESS_MSB, 3)\n\n return float(self._compensate_pressure((pRaw[0] << 12) + (pRaw[1] << 4) + (pRaw[2] >> 4)))", "def wind_ppa():\n per_kwh = 0.0384 # [$/kWh]\n\n return per_kwh", "def pressure(self, loading):\n return loading * numpy.exp(\n -numpy.log(self.params['K']) + self.params['A'] * loading +\n self.params['B'] * loading**2 + self.params['C'] * loading**3\n )", "def native_pressure(self) -> float | None:\n return self._pressure", "def calculate_water_vapour_pressure(self, T=None, units='atm'): # temp in Kelvin\n A,B,C = self.get_ABC(T=T)\n \n if A is not None and B is not None and C is not None:\n # bar \n p_vap_bar = math.pow(10, (A-B/(C+T)))\n if units=='bar':\n return p_vap_bar\n \n # atm\n elif units=='atm': \n p_vap_atm = convertor.convert(\n p_vap_bar, \n currentUnits='bar', \n newUnits='atm')\n return p_vap_atm\n \n else:\n return None\n else:\n return None", "def get_pressure(self):\n\n\t\tvoltage_pressure = self.pressure_sensor.getVoltage()\n\t\tnew_value = (250 * voltage_pressure / 5) - 25\n\n\t\tself._pressure_samples.append(new_value)\n\n\t\tif not self.pressure_timer.hasPeriodPassed(self.pressure_timer_delay):\n\t\t\treturn self._last_pressure_value\n\n\t\t# Calculate new running average\n\t\tnew_avg = sum(self._pressure_samples) / len(self._pressure_samples)\n\n\t\tself._pressure_samples = [ ]\n\t\tself._last_pressure_value = new_avg\n\n\t\treturn new_avg", "def cloudsPressure(self):\n return self._cloud_pressure", "def convert_pressure(self, event):\n try:\n #Compare other unit to one unit(pascals)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"atm\": 101325.0, \"bars\": 100000.0, \"centimeters mercury\": 1333.22, \"centimeters water\": 98.0665, \"feet of water\": 2989.06692, \"hectopascals\": 100.0, \"inches of mercury\": 3386.388, \"inches of water\": 249.08891, \"kilogram-force/sq.centimeter\": 98066.5, \"kilogram-force/sq.meter\": 9.80665, \"kilonewtons/sq.meter\": 1000.0, \"kilonewtons/sq.millimeter\": 1000000000.0, \"kilopascals\": 1000.0, \"kips/sq.inch\": 6894760.0, \"meganewtons/sq.meter\": 1000000.0, \"meganewtons/sq.millimeter\": 1000000000000.0, \"meters of water\": 9806.65, \"millibars\": 100.0, \"millimeters of mercury\": 133.322, \"millimeters of water\": 9.80665, \"newtons/sq.centimeter\": 10000.0, \"newtons/sq.meter\": 1.0, \"newtons/sq.millimeter\": 1000000.0, \"pascals\": 1.0, \"poundals/sq.foot\": 1.44816, \"pounds-force/sq.foot\": 47.88, \"pounds-force/sq.inch\": 6894.757, \"tonnes-force/sq.cm\": 98066500.0, \"tonnes-force/sq.meter\": 9806.65, \"tons(UK)-force/sq.foot\": 107251.0, \"tons(UK)-force/sq.inch\": 15444280.0, \"tons(US)-force/sq.foot\": 95760.0, \"tons(US)-force/sq.inch\": 13789500.0, \"torr\": 133.322}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def read_pressure(self):\n self._force_read(False)\n\n presADC = (self._read_register_1ubyte(self.BME680_PRESS_MSB) << 12) | (self._read_register_1ubyte(self.BME680_PRESS_LSB) << 4) | (self._read_register_1ubyte(self.BME680_PRESS_XLSB) >> 4)\n\n return float(self._compensate_pressure(presADC))", "def f_UPPS_pc(v, P_0, r_f, d, s, T, wealth, phi, n_s, n_o, K):\n W_T = f_W_T_pc(v, P_0, r_f, d, s, T, wealth, phi, n_s, n_o, K)\n value = pow(W_T, -gamma) * f_W_T_to_P_T_pc(v, P_0, r_f, d, s, T, wealth, phi, n_s, n_o, K) * f_P_T_to_P_0(v, r_f, d, s, T)\n return value", "def spreading_pressure(self, pressure):\n return self.params[\"n_m\"] * numpy.log(1.0 + self.params[\"K\"] * pressure)", "def psi_to_pa(value: float) -> float:\n quantity_in_psi = value * imperial.psi\n quantity_in_pa = quantity_in_psi.to(u.Pa)\n return quantity_in_pa.value", "def murnaghan(p, v):\n return p[0]+p[1]*v/p[2]*((p[3]/v)**p[2]/(p[2]-1)+1)-p[1]*p[3]/(p[2]-1)", "def princarg(phase_in):\n phase = np.mod(phase_in + np.pi,-2*np.pi)+np.pi;\n return phase", "def calP(self):\n N = len(self.listOfParticles)\n m = self.listOfParticles[0].m\n vsum = 0\n for particle in self.listOfParticles:\n vsum += particle.V.len()\n A = np.pi*self.R**2\n F = 0.5 * A * (2*self.R) * m * N * vsum**2\n return F", "def ppd(self):\n return math.sqrt(np.dot(self.v, self.v) / np.dot(self.w, self.w) )", "def liqpressure(temp):\n tau = temp/_TTP\n pres = 1.\n for (a,b) in _C_PMELT:\n pres += a * (1 - tau**b)\n pres *= _PTPE\n return pres", "def clapeyron_lv(self):\n # P(T) = P' exp[ (H_vap / R) (1 / T' - 1 / T) ] where T' is TP_temperature\n T_arr = np.linspace(self.TP_temperature.magnitude,\n self.CP_temperature.magnitude,\n 100) * ureg.K\n\n if np.isnan(self.H_vap_boil.magnitude):\n H_vap = self.H_vap\n else:\n H_vap = self.H_vap_boil\n\n cte = H_vap / gas_constant\n P_arr = self.TP_pressure * \\\n np.exp(cte * (1/self.TP_temperature - 1/T_arr))\n return T_arr, P_arr", "def kPa(self):\n return KiloPascal(self.base_value / 1000.0)", "def ptsigmave(self,prof_pt): #getter halla esfuerzo efectivo vertical en un punto\r\n sigmave=self.ptsigmav(prof_pt)-self.ptpresionagua(prof_pt)\r\n sigmave=round(sigmave,3)\r\n return sigmave", "def getPureVaporPressures(self,T):\n\t\tanswer = list()\n\t\tfor c in self.components:\n\t\t\tanswer.append( c.getPureComponentVaporPressure(T) )\n\t\treturn numpy.array(answer)", "def APV_equation(self, uct):\n q_v_ = uct.total_reward\n nsa = uct.visit_time\n sigma_nsb = uct.my_parent.visit_time - 1\n psa = uct.psa\n if nsa == 0:\n return float('inf')\n equation = q_v_ / nsa + 2 * psa * math.sqrt(sigma_nsb) / nsa\n return equation", "def pv(rate, n_years):\n return 1 / fv(rate, n_years)", "def tsVs(self):\n self.__percentuale = self.ui.percentualeTs.value()", "def v_p(self, psi_l, ci):\n\t\treturn min((ci*self.VPMAX0)/(ci + self.KP), self.VPR)", "def get_value(p, t, q, c, v):\n\n gas = chemistry.ConstituentProperties(c)\n Psat_gas = gas.Psat(t)\n\n if c.upper() == 'H2S':\n if p < 43. and p * q * v > Psat_gas: # Pressure greater than saturation pressure\n return str(1.0)\n elif p < 43. and p * q * v < Psat_gas:\n return str(v)\n else:\n return str(0.8)\n else:\n return str(1.0)", "def getVaporPressures(self,T):\n\t\treturn self.getPureVaporPressures(T) * self.getMoleFractions()", "def caput(PV, value):\n epics.caput(PV, value)", "def loading(self, pressure):\n kp = self.params[\"K\"] * pressure\n return self.params[\"n_m\"] * kp / (1.0 + kp)", "def getP(self, Vinv):\n XVX = np.dot(np.dot(self.X.T, Vinv), self.X)\n P = Vinv - np.dot(np.dot(Vinv, self.X) * (1./XVX), np.dot(self.X.T, Vinv))\n return P", "def psi(self):\n return PoundSquareInch(self.base_value / 6894.76)", "def get_ppm(self):\n return self.PARA * math.pow((self.get_resistance()/ self.RZERO), -self.PARB)", "def get_ppm(self):\n return self.PARA * math.pow((self.get_resistance()/ self.RZERO), -self.PARB)", "def PV(rate, nper, pmt, fv):\n if type(pmt) == int:\n pmt = np.array([pmt])\n else:\n pmt = np.array(pmt)\n if nper <= 0:\n print(\"nper needs to be greater than zero.\")\n elif nper != len(pmt) and sum(pmt) != 0:\n print(\"pmt vector length needs to match nper or be zero.\")\n else:\n pv_fv = fv / (1 + rate) ** nper\n fv_pmt = [(pmt[i - 1] / (1 + rate) ** i) for i in np.arange(1, len(pmt) + 1, 1)]\n return(sum(fv_pmt) + pv_fv)", "def pais(self):\n return self._pais", "def get_pressure_setpoint(self) -> float:\n\n return self.send(self.cmd.GET_VACUUM_SET)", "def propabilityLVQ(self):\n self.labels = self.labelingLVQ()\n for i in range(self.labels.shape[0]):\n for j in range(self.labels.shape[1]):\n for k in range(self.labels.shape[2]):\n total = sum(self.labels[i, j, k] for i in range(self.labels.shape[0]))\n if total == 0. :\n continue\n else:\n self.propa[i, j, k] = self.labels[i, j, k] / total\n self.propa[i, j, k] = round(self.propa[i, j, k], 2)\n return self.propa", "def p() -> float:\n return 0.9", "def mean2pv(self, x):\n return x", "def vappressure(temp):\n tau = temp/_TTP\n earg = 0.\n for (a,b) in _C_PSUBL:\n earg += a * tau**(b-1)\n pres = _PTPE * numpy.exp(earg)\n return pres", "def saar99_rossby_empirical(P, BmV):\n tau_c = saar99_tau_c_E(BmV)\n return P/(4.*np.pi*tau_c)", "def _get_pressure(self):\n assert self.serial_connection.isOpen()\n\n self.serial_connection.write('PR1' + self.CR + self.LF)\n acknowledgement = self.serial_connection.readline()\n self._check_acknowledgement(acknowledgement)\n\n self.serial_connection.write(self.ENQ)\n err_msg_and_pressure = self.serial_connection.readline().rstrip(self.LF).rstrip(self.CR)\n\n err_msg = err_msg_and_pressure[0]\n pressure = float(err_msg_and_pressure[3:])\n\n if err_msg != '0':\n message = 'Pressure query resulted in an error: ' + self.MEASUREMENT_STATUS[err_msg]\n raise IOError(message)\n\n self.serial_connection.write(self.CR + self.LF)\n return pressure", "def test_isentropic_pressure_p_increase():\n lev = [85000, 90000., 95000., 100000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 288.\n tmp[1, :] = 290.\n tmp[2, :] = 292.\n tmp[3, :] = 296.\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk)\n trueprs = 1000. * units.hPa\n assert_almost_equal(isentprs[0], trueprs, 3)", "def birch_murnaghan(p, v):\n return p[0]+9.0/16*p[3]*p[1]*( ( (p[3]/v)**(2.0/3)-1 )**3*p[2]+\n ( (p[3]/v)**(2.0/3)-1 )**2*\n ( 6-4*(p[3]/v)**(2.0/3) ) )", "def alpha_pp(self, x):\n y = (2.*x)**3.\n return 0.10 - ( (self.alpha_p + 0.10) * y / (1. + y) )", "def getEnthalpyOfVaporization(self,Temperature):\n\t\tB = self.Antoine_params[1]\n\t\tC = self.Antoine_params[2]\n\n\t\t# Eqn 7 from Epstein et al 2009\n\t\tHvap = 2.303*8.3145*Temperature*Temperature*B/((C + Temperature - 273.15)*(C + Temperature - 273.15))\n\t\treturn Hvap # units are J/molK", "def bar_to_pa(value: float) -> float:\n quantity_in_bar = value * misc.bar\n quantity_in_pa = quantity_in_bar.to(u.Pa)\n return quantity_in_pa.value", "def phase_Venus_1(alpha):\n phase = 10.**(-0.4*(- 1.044e-03*alpha + 3.687e-04*alpha**2. - 2.814e-06*alpha**3. + 8.938e-09*alpha**4.))\n return phase", "def mean2pv(self, x):\n raise NotImplementedError" ]
[ "0.7080288", "0.69284487", "0.68418944", "0.6743915", "0.66257185", "0.6582194", "0.6576109", "0.6573769", "0.6532861", "0.65210193", "0.65180355", "0.64524937", "0.64445084", "0.63541234", "0.6329281", "0.6282231", "0.62752527", "0.62735087", "0.62735087", "0.62735087", "0.62734765", "0.6263237", "0.6207266", "0.6198103", "0.6190183", "0.6182635", "0.6165736", "0.616461", "0.6116347", "0.6102871", "0.6079796", "0.6079796", "0.6079796", "0.6072444", "0.6071139", "0.60626894", "0.6056735", "0.6037736", "0.60338306", "0.60083973", "0.59993964", "0.59788805", "0.594822", "0.59450763", "0.5942546", "0.5939439", "0.5937553", "0.59321016", "0.5912393", "0.5901427", "0.5879102", "0.5843779", "0.5843556", "0.5827213", "0.58249575", "0.5803813", "0.5796363", "0.5796235", "0.579173", "0.57815665", "0.5778905", "0.57523656", "0.57221305", "0.571589", "0.57151365", "0.57009614", "0.5700857", "0.56918776", "0.5684582", "0.567057", "0.56587374", "0.5658038", "0.5651398", "0.5648448", "0.5641734", "0.5630686", "0.56278944", "0.5605756", "0.55840474", "0.556988", "0.55660856", "0.55633754", "0.55544776", "0.55544776", "0.5550304", "0.5529722", "0.5524715", "0.55206436", "0.55198544", "0.55192184", "0.5513196", "0.5508594", "0.55076224", "0.55021447", "0.54867935", "0.5485599", "0.5483712", "0.54768336", "0.5473629", "0.5462859" ]
0.80786455
0
Calculate mean free path via hard sphere approximation.
def mean_free_path(self): return self.dynamic_viscosity / self.density * np.sqrt(np.pi * 1e-3*self.molar_mass / (2*gas_constant * self.temperature))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sphere_sre(solution):\n a = 0\n bias = 0.2\n x = solution.get_x()\n x1 = x[:10]\n x2 = x[10:]\n value1 = sum([(i-bias)*(i-bias) for i in x1])\n value2 = 1/len(x) * sum([(i-bias)*(i-bias) for i in x2])\n return value1 + value2", "def mean_free_path(self, n, T):\n lambda_0 = 1 / (sqrt(2) * pi * self.d_ref **2 \n * n * (self.t_ref / T) ** (self.omega - 0.5))\n return lambda_0", "def sphere(\n network,\n pore_diameter='pore.diameter'\n):\n return 4/3*_pi*(network[pore_diameter]/2)**3", "def Mean_square_beam_radius(self,Amp,x,y,Amp_flag=True):\r\n \r\n if Amp_flag:\r\n I = (Amp*np.conjugate(Amp)).real\r\n else:\r\n I = Amp\r\n dx = x[0,1]-x[0,0]\r\n x_c,y_c = self.Light_Spot_Centroid(Amp,x,y,Amp_flag)\r\n Nominator_x = self.double_trapz(I*(x-x_c)**2,dx=dx,dy=dx)\r\n Nominator_y = self.double_trapz(I*(y-y_c)**2,dx=dx,dy=dx)\r\n Denominator = self.double_trapz(I,dx=dx,dy=dx)\r\n Res = Nominator_x/Denominator+Nominator_y/Denominator\r\n \r\n return np.sqrt(Res)", "def __init__(self, a, f):\n\n self.a = float(a)\n \"\"\"The equatorial radius in meters (readonly)\"\"\"\n self.f = float(f)\n \"\"\"The flattening (readonly)\"\"\"\n self._f1 = 1 - self.f\n self._e2 = self.f * (2 - self.f)\n self._ep2 = self._e2 / Math.sq(self._f1) # e2 / (1 - e2)\n self._n = self.f / ( 2 - self.f)\n self._b = self.a * self._f1\n # authalic radius squared\n self._c2 = (Math.sq(self.a) + Math.sq(self._b) *\n (1 if self._e2 == 0 else\n (Math.atanh(math.sqrt(self._e2)) if self._e2 > 0 else\n math.atan(math.sqrt(-self._e2))) /\n math.sqrt(abs(self._e2))))/2\n # The sig12 threshold for \"really short\". Using the auxiliary sphere\n # solution with dnm computed at (bet1 + bet2) / 2, the relative error in\n # the azimuth consistency check is sig12^2 * abs(f) * min(1, 1-f/2) / 2.\n # (Error measured for 1/100 < b/a < 100 and abs(f) >= 1/1000. For a given\n # f and sig12, the max error occurs for lines near the pole. If the old\n # rule for computing dnm = (dn1 + dn2)/2 is used, then the error increases\n # by a factor of 2.) Setting this equal to epsilon gives sig12 = etol2.\n # Here 0.1 is a safety factor (error decreased by 100) and max(0.001,\n # abs(f)) stops etol2 getting too large in the nearly spherical case.\n self._etol2 = 0.1 * Geodesic.tol2_ / math.sqrt( max(0.001, abs(self.f)) *\n min(1.0, 1-self.f/2) / 2 )\n if not(Math.isfinite(self.a) and self.a > 0):\n raise ValueError(\"Equatorial radius is not positive\")\n if not(Math.isfinite(self._b) and self._b > 0):\n raise ValueError(\"Polar semi-axis is not positive\")\n self._A3x = list(range(Geodesic.nA3x_))\n self._C3x = list(range(Geodesic.nC3x_))\n self._C4x = list(range(Geodesic.nC4x_))\n self._A3coeff()\n self._C3coeff()\n self._C4coeff()", "def sph(grlat, elong, ht):\n\n # Initialize Variables\n global cth, sth, clg, slg, dif, radn, gl # common/obs/\n gn = 9.798277692\n ae = 6378140.0\n f = 0.00335281\n rm = 0.00344978\n dr = 0.01745329252\n\n clong = np.cos(elong * dr)\n slong = np.sin(elong * dr)\n # latitude difference\n dvert = f * (1.0 + 0.5 * f) * np.sin(2.0 * grlat * dr) - 0.5 * f * f * np.sin(\n 4.0 * grlat * dr\n )\n gcclat = (3.1415926535898 / 2.0) - (grlat * dr - dvert)\n cthet = np.cos(gcclat)\n sthet = np.sin(gcclat)\n # geocentric radius\n radn = 1 - f * (cthet ** 2) * (1 + 1.5 * f * (sthet ** 2))\n # formulae for g are from jeffreys, 4.022 and 4.023\n g = gn * (\n 1\n + f\n - 1.5 * rm\n + f * (f - (27 / 14) * rm)\n + (2.5 * rm - f - f * (f - (39 / 14) * rm)) * (cthet ** 2)\n - (f / 2) * (7 * f - 15.0 * rm) * ((cthet * sthet) ** 2)\n )\n # free air correction\n g = g - g * (2.0 * ht * (1.0 + f + rm - 2.0 * f * (cthet ** 2)) / ae)\n\n # Conversion Here for Globals\n cth = cthet\n sth = sthet\n clg = clong\n slg = slong\n dif = dvert\n gl = g", "def asphericity(Rnm_eg):\n num = (Rnm_eg[0] - Rnm_eg[2])**2 + (Rnm_eg[1] - Rnm_eg[2])**2 + (Rnm_eg[0] - Rnm_eg[1])**2\n dem = 2*(Rnm_eg[0] + Rnm_eg[1] + Rnm_eg[2])**2\n Asphere = num/dem\n return Asphere", "def asphericity(Rnm_eg):\n num = (Rnm_eg[0] - Rnm_eg[2])**2 + (Rnm_eg[1] - Rnm_eg[2])**2 + (Rnm_eg[0] - Rnm_eg[1])**2\n dem = 2*(Rnm_eg[0] + Rnm_eg[1] + Rnm_eg[2])**2\n Asphere = num/dem\n return Asphere", "def sphere(indiv):\n return sum([ x ** 2 for x in indiv])", "def sphere(self, x):\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n return sum((x+0)**2)", "def mHollowSphere(a=3, b=6, N=250):\n a = float(a)\n b = float(b)\n N = int(N)\n rmin = 0\n rmax = 2*b\n dr = (rmax-rmin)/float(N)\n r = np.zeros((N))\n g = np.zeros((N))\n for i in range(N):\n r[i] = rmin+i*dr\n g[i] = 0\n if r[i] >= a and r[i] < b:\n g[i] = (r[i]-a)/(b-a)/np.power(r[i], 2)\n elif r[i] >= b:\n g[i] = 1/np.power(r[i], 2)\n return r, g", "def odf(self, sphere):\n self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)\n if self.gqi_vector is None:\n if self.model.method == 'gqi2':\n H = squared_radial_component\n # print self.gqi_vector.shape\n self.gqi_vector = np.real(H(np.dot(\n self.model.b_vector, sphere.vertices.T) *\n self.model.Lambda))\n if self.model.method == 'standard':\n self.gqi_vector = np.real(np.sinc(np.dot(\n self.model.b_vector, sphere.vertices.T) *\n self.model.Lambda / np.pi))\n self.model.cache_set('gqi_vector', sphere, self.gqi_vector)\n\n return np.dot(self.data, self.gqi_vector)", "def hardSphereRadius(self):\n\n return self.__hardSphereRadius", "def geometric_tortuosity(maze):\n pathsTotal = []\n path_star_list = findPoints(maze, \"S\")\n\n total_caminos = []\n unit_caminos = 0\n array_path = np.array(maze)\n line = (array_path.shape)[2]\n global path\n i = 0\n path_star_list = endPoints(maze[0], 'S')\n # print(path_star_list)\n listEndPoints = endPoints(maze[-1], \"E\")\n # print(path_star_list)\n # print(listEndPoints)\n toTal = len(listEndPoints)*len(path_star_list)\n\n for star in path_star_list:\n caminos = []\n for end in listEndPoints:\n print(\"camino:\"+str(i+1)+\"/\"+str(toTal))\n\n path = astar(maze, star, end)\n\n if path != None:\n pathsTotal.append(path)\n\n i += 1\n result = 0\n # caminos.append(path)\n # total_caminos.append(caminos)\n try:\n x = map(valuepath, path)\n result = sum(x)\n except:\n pass\n\n caminos.append(result)\n unit_caminos += 1\n\n total_caminos.append(min(caminos))\n\n valor = (np.mean(np.array(total_caminos)))\n geometric_tortusity = valor/(int(line)-1)\n return geometric_tortusity, pathsTotal\n # return \"f\",\"f\"", "def _calc_solar_from_clouds_and_angle(hr, ds_path):\n # Solar radiation [W/m^2] incident on top of atmosphere\n Q_o = 1368.0\n # Cloud model based on Dobson and Smith, table 5\n # SEA -- May 2010 : redid the cloud parametrization based on UBC\n # Solar data (/ocean/shared/SoG/met/solar/) fitting Q to cos_Z\n # (not Q/cos_Z as Kate did). Allen and Wolfe (2013). (0) no\n # clouds, (1) 1/10 cloud fraction (10) 100% clouds. Four sig\n # figs are what comes out of matlab but standard deviations are\n # 40W/m2 for low cloud fraction to 120 W/m2 for 6-9 cloud\n # fraction to 85 W/m2 for completely cloudy.\n cloud_consts = SimpleNamespace(\n A=numpy.array(\n [\n 0.6337,\n 0.6149,\n 0.5861,\n 0.5512,\n 0.5002,\n 0.4649,\n 0.4225,\n 0.3669,\n 0.2468,\n 0.1981,\n 0.0841,\n ]\n ),\n B=numpy.array(\n [\n 0.1959,\n 0.2119,\n 0.2400,\n 0.2859,\n 0.3192,\n 0.3356,\n 0.3339,\n 0.3490,\n 0.4427,\n 0.3116,\n 0.2283,\n ]\n ),\n )\n # Local standard time\n ## WARNING: .to(\"PST\") may be fragile and incorrect for summer-time dates\n lst = hr.to(\"PST\")\n # day_time is in seconds, LST\n day_time = (lst - lst.floor(\"day\")).seconds\n # hour of day as degrees from noon\n hour = (day_time / 3600 - 12) * 15\n # day is year-day\n day = (lst - lst.floor(\"year\")).days\n # solar declination [radians]\n declination = (\n 23.45 * numpy.pi / 180 * numpy.sin((284 + day) / 365.25 * 2 * numpy.pi)\n )\n # Latitude of approximate centre of model domain in radians\n lat = numpy.pi * 50 / 180\n # solar elevation\n elev_sin = numpy.sin(declination) * numpy.sin(lat)\n elev_cos = numpy.cos(declination) * numpy.cos(lat)\n cos_Z = elev_sin + elev_cos * numpy.cos(numpy.pi / 180 * hour)\n # cos of -hour_angle in radians\n hour_angle = numpy.tan(lat) * numpy.tan(declination)\n # assume we are south of the Arctic Circle\n day_length = numpy.arccos(-hour_angle) / 15 * 2 * 180 / numpy.pi\n sunrise = 12 - 0.5 * day_length # hours\n sunset = 12 + 0.5 * day_length # hours\n Qso = Q_o * (1 + 0.033 * numpy.cos(day / 365.25 * 2 * numpy.pi))\n with xarray.open_dataset(ds_path) as ds:\n cf_value = ds.percentcloud * 10\n fcf = numpy.floor(cf_value).astype(int) # integer below cf value\n fcf = xarray.where(fcf == 10, 9, fcf).data\n ccf = fcf + 1 # integer above cf value\n if (sunrise > day_time / 3600) or (day_time / 3600 > sunset):\n # nighttime\n return xarray.zeros_like(ds.percentcloud)\n return (\n Qso\n * (\n cloud_consts.A[fcf] * (ccf - cf_value)\n + cloud_consts.A[ccf] * (cf_value - fcf)\n + (\n cloud_consts.B[fcf] * (ccf - cf_value)\n + cloud_consts.B[ccf] * (cf_value - fcf)\n )\n * cos_Z\n )\n * cos_Z\n )", "def on_sphere():\n vec = np.random.standard_normal(3)\n return vec / np.linalg.norm(vec)", "def vincenty_direct_solution(begin_lat, begin_lon, begin_azimuth, distance, a, b, f):\n # Convert latitude, longitude, azimuth of the begining point to radians\n lat1 = math.radians(begin_lat)\n lon1 = math.radians(begin_lon)\n alfa1 = math.radians(begin_azimuth)\n\n sinAlfa1 = math.sin(alfa1)\n cosAlfa1 = math.cos(alfa1)\n\n # U1 - reduced latitude\n tanU1 = (1 - f) * math.tan(lat1)\n cosU1 = 1 / math.sqrt(1 + tanU1 * tanU1)\n sinU1 = tanU1 * cosU1\n\n # sigma1 - angular distance on the sphere from the equator to begining point\n sigma1 = math.atan2(tanU1, math.cos(alfa1))\n\n # sinAlfa - azimuth of the geodesic at the equator\n sinAlfa = cosU1 * sinAlfa1\n cosSqAlfa = 1 - sinAlfa * sinAlfa\n uSq = cosSqAlfa * (a * a - b * b) / (b * b)\n A = 1 + uSq / 16384 * (4096 + uSq * (-768 + uSq * (320 - 175 * uSq)))\n B = uSq / 1024 * (256 + uSq * (-128 + uSq * (74 - 47 * uSq)))\n\n sigma = distance / (b * A)\n sigmap = 1\n\n while (math.fabs(sigma - sigmap) > 1e-12):\n cos2sigmaM = math.cos(2 * sigma1 + sigma)\n sinSigma = math.sin(sigma)\n cosSigma = math.cos(sigma)\n dSigma = B * sinSigma * (cos2sigmaM + B / 4 * (\n cosSigma * (-1 + 2 * cos2sigmaM * cos2sigmaM) - B / 6 * cos2sigmaM * (\n -3 + 4 * sinSigma * sinSigma) * (-3 + 4 * cos2sigmaM * cos2sigmaM)))\n sigmap = sigma\n sigma = distance / (b * A) + dSigma\n\n var_aux = sinU1 * sinSigma - cosU1 * cosSigma * cosAlfa1 # Auxiliary variable\n\n # Latitude of the end point in radians\n lat2 = math.atan2(sinU1 * cosSigma + cosU1 * sinSigma * cosAlfa1,\n (1 - f) * math.sqrt(sinAlfa * sinAlfa + var_aux * var_aux))\n\n lamb = math.atan2(sinSigma * sinAlfa1, cosU1 * cosSigma - sinU1 * sinSigma * cosAlfa1)\n C = f / 16 * cosSqAlfa * (4 + f * (4 - 3 * cosSqAlfa))\n L = lamb - (1 - C) * f * sinAlfa * (\n sigma + C * sinSigma * (cos2sigmaM + C * cosSigma * (-1 + 2 * cos2sigmaM * cos2sigmaM)))\n # Longitude of the second point in radians\n lon2 = (lon1 + L + 3 * math.pi) % (2 * math.pi) - math.pi\n\n # Convert to decimal degrees\n lat2_dd = math.degrees(lat2)\n lon2_dd = math.degrees(lon2)\n\n return lat2_dd, lon2_dd", "def calc_gasSD_inside_half_mass(galObj, gas_m, gas_pos, halfMassR='gas'):\n if halfMassR == 'gas':\n half_mass_radius = galObj.radii['gas_half_mass'].in_units('kpc')\n elif halfMassR == 'stars':\n half_mass_radius = galObj.radii['stellar_half_mass'].in_units('kpc')\n\n extent = np.sqrt(gas_pos[:, 0]**2 + gas_pos[:, 1]**2 + gas_pos[:, 2]**2)\n mask = extent <= half_mass_radius\n gasSD = np.sum(gas_m[mask])/np.pi/(half_mass_radius*1.e3)**2\n print(\"gas SD from particles within half-mass: \")\n print(gasSD)\n print(\"gas SD from global gas mass: \")\n print(galObj.masses['gas'] / np.pi / (half_mass_radius*1.e3)**2)\n\n print(galObj.masses['gas'])\n print(np.sum(gas_m[mask]))\n # hmmm.....\n # import pdb; pdb.set_trace()\n return gasSD", "def odf(self, sphere):\n raise NotImplementedError(\"To be implemented in sub classes\")", "def calc_gravitational_energy(self):\n\n star = self.star\n\n M, K, N = star.mesh_size\n ph = star.phi_coords\n mu = star.mu_coords\n r = star.r_coords\n\n def S1(j, k):\n sum = 0\n\n for i in range(0, M - 2, 2):\n sum += (1 / 6) * (ph[i + 2] - ph[i]) * (star.rho[i, j, k] * star.Phi[i, j, k] + 4 *\n star.rho[i + 1, j, k] * star.Phi[i + 1, j, k] +\n star.rho[i + 2, j, k] * star.Phi[i + 2, j, k])\n return 2 * sum\n\n def S2(k):\n sum = 0\n\n for j in range(0, K - 2, 2):\n sum += (1 / 6) * (mu[j + 2] - mu[j]) * \\\n (S1(j, k) + 4 * S1(j + 1, k) + S1(j + 2, k))\n\n return 2 * sum\n\n W = 0\n\n for k in range(0, N - 2, 2):\n W -= 0.5 * (1 / 6) * (r[k + 2] - r[k]) * (r[k]**2 * S2(k) +\n 4 * r[k + 1]**2 * S2(k + 1) +\n r[k + 2]**2 * S2(k + 2))\n\n return W", "def calc_gravitational_energy(self):\n\n star = self.star\n\n K, N = star.mesh_size\n mu = star.mu_coords\n r = star.r_coords\n\n def S1(j):\n return np.sum((mu[2::2] - mu[:-2:2]) * (star.rho[:-2:2, j] * star.Phi[:-2:2, j] +\n 4 * star.rho[1:-1:2, j] * star.Phi[1:-1:2, j] +\n star.rho[2::2, j] * star.Phi[2::2, j])) / 6\n\n W = 0\n\n for j in range(0, N - 2, 2):\n W += (r[j + 2] - r[j]) * (r[j]**2 * S1(j) +\n 4 * r[j + 1]**2 * S1(j + 1) +\n r[j + 2]**2 * S1(j + 2))\n\n return -1 / 3 * np.pi * W", "def mean_radius(self):\n return (self.semimajor_axis + self.semimedium_axis + self.semiminor_axis) / 3", "def vincenty_direct_solution(begin_lat, begin_lon, begin_azimuth, distance, a, b, f):\n # Convert latitude, longitude, azimuth of the begining point to radians\n lat1 = math.radians(begin_lat)\n lon1 = math.radians(begin_lon)\n alfa1 = math.radians(begin_azimuth)\n\n sinAlfa1 = math.sin(alfa1)\n cosAlfa1 = math.cos(alfa1)\n \n # U1 - reduced latitude\n tanU1 = (1 - f) * math.tan(lat1)\n cosU1 = 1 / math.sqrt(1 + tanU1 * tanU1)\n sinU1 = tanU1 * cosU1\n \n # sigma1 - angular distance on the sphere from the equator to begining point\n sigma1 = math.atan2(tanU1, math.cos(alfa1))\n \n # sinAlfa - azimuth of the geodesic at the equator\n sinAlfa = cosU1 * sinAlfa1\n cosSqAlfa = 1 - sinAlfa * sinAlfa\n uSq = cosSqAlfa * (a * a - b * b) / (b * b)\n A = 1 + uSq/16384 * (4096 + uSq * (-768 + uSq * (320 - 175 * uSq)))\n B = uSq/1024 * (256 + uSq * (-128 + uSq * (74 - 47 * uSq)))\n \n sigma = distance / (b * A)\n sigmap = 1\n \n while (math.fabs(sigma - sigmap) > 1e-12):\n cos2sigmaM = math.cos(2 * sigma1 + sigma)\n sinSigma = math.sin(sigma)\n cosSigma = math.cos(sigma)\n dSigma = B*sinSigma*(cos2sigmaM+B/4*(cosSigma*(-1+2*cos2sigmaM*cos2sigmaM)-B/6*cos2sigmaM*(-3+4*sinSigma*sinSigma)*(-3+4*cos2sigmaM*cos2sigmaM))) \n sigmap = sigma\n sigma = distance / (b * A) + dSigma\n \n var_aux = sinU1 * sinSigma - cosU1 * cosSigma * cosAlfa1\n \n # Latitude of the end point in radians\n lat2 = math.atan2(sinU1 * cosSigma + cosU1 * sinSigma*cosAlfa1, (1 - f)*math.sqrt(sinAlfa * sinAlfa + var_aux*var_aux))\n \n lamb = math.atan2 (sinSigma * sinAlfa1, cosU1 * cosSigma - sinU1 * sinSigma * cosAlfa1)\n C = f / 16 * cosSqAlfa * (4 + f * (4 - 3 * cosSqAlfa))\n L = lamb - (1 - C) * f * sinAlfa *(sigma + C * sinSigma * (cos2sigmaM + C * cosSigma * (-1 + 2 * cos2sigmaM * cos2sigmaM)))\n # Longitude of the second point in radians\n lon2 = (lon1 + L +3*math.pi)%(2*math.pi) - math.pi\n \n # Convert to decimal degrees\n lat2_dd = math.degrees(lat2) \n lon2_dd = math.degrees(lon2)\n \n return lat2_dd, lon2_dd", "def mean_surge_force(w, depth, radius, RAOs, N=10, deep_water=False):\n\n g = 9.81\n k = w**2 / g # XXX deep water\n ka = k * radius\n kd = k * depth\n\n # This is the part of the force which depends on the incoming waves\n hankel_term = lambda n, ka: 1 - ((hankel1d(n, ka) * hankel2d(n-1, ka)) /\n (hankel2d(n, ka) * hankel1d(n-1, ka)))\n terms = np.nansum([hankel_term(n, ka) for n in range(1, N)], axis=0)\n if deep_water:\n incoming_part = 1 * terms\n else:\n incoming_part = (1 + (2*kd/sinh(2*kd))) * terms\n\n # This is the part which depends on the first-order motion\n hankel_term2 = (hankel2d(0, ka) / hankel1d(0, ka) +\n hankel2d(2, ka) / hankel1d(2, ka))\n if deep_water:\n motion_part = 2j * ((RAOs[:, 0] - (RAOs[:, 4] / k)) *\n hankel_term2 / hankel2d(1, ka))\n else:\n motion_part = 2j * ((RAOs[:, 4] * (1 - cosh(kd)) / k +\n RAOs[:, 0] * sinh(kd)) /\n (cosh(kd) * hankel2d(1, ka)) * hankel_term2)\n\n return 0.5 / ka * np.real(incoming_part + motion_part)", "def sphere_area(radius : number) -> number:\n area = 4*pi*radius*radius\n return area", "def Sphere_ExactSerendipityLagrangeQuad():\n\n mesh = Sphere_CubeToSerendipityLagrangeQuad(1)\n \n ################\n # Modifications for exact sphere\n ################\n # x=+1 side\n def posXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[0].vals = posXvals\n mesh.eList[0].normals = posXnormals\n mesh.eList[0].J = posXJ\n \n def posYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[1].vals = posYvals\n mesh.eList[1].normals = posYnormals\n mesh.eList[1].J = posYJ\n \n # x=-1 side\n def negXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[2].vals = negXvals\n mesh.eList[2].normals = negXnormals\n mesh.eList[2].J = negXJ\n\n # y=-1 side\n def negYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[3].vals = negYvals\n mesh.eList[3].normals = negYnormals\n mesh.eList[3].J = negYJ\n \n # z=+1 side\n def posZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1)\n yb=np.array(-xi2)\n zb=np.ones(xi1.shape)\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[4].vals = posZvals\n mesh.eList[4].normals = posZnormals\n mesh.eList[4].J = posZJ\n \n # z=-1 side\n def negZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[5].vals = negZvals\n mesh.eList[5].normals = negZnormals\n mesh.eList[5].J = negZJ\n \n for e in mesh.eList:\n e.ExactElement = True\n \n return mesh", "def mean_surge_force2(w, depth, radius, RAOs, N=10, deep_water=False):\n\n surge_rao = RAOs[:, 0] - depth*RAOs[:, 4]\n pitch_rao = RAOs[:, 4]\n\n g = 9.81\n k = w**2 / g # XXX deep water\n ka = k * radius\n kd = k * depth\n\n # This is the part of the force which depends on the incoming waves\n hankel_term = lambda n, ka: 1 - ((hankel1d(n, ka) * hankel2d(n-1, ka)) /\n (hankel2d(n, ka) * hankel1d(n-1, ka)))\n terms = np.nansum([hankel_term(n, ka) for n in range(1, N)], axis=0)\n if deep_water:\n incoming_part = 1 * terms\n else:\n incoming_part = (1 + (2*kd/sinh(2*kd))) * terms\n\n # This is the part which depends on the first-order motion\n hankel_term2 = (hankel2d(0, ka) / hankel1d(0, ka) +\n hankel2d(2, ka) / hankel1d(2, ka))\n if deep_water:\n motion_part = 2j * ((surge_rao + pitch_rao * depth * (1 - (1/kd))) *\n hankel_term2 / hankel2d(1, ka))\n else:\n motion_part = 2j * ((pitch_rao * (1 - cosh(kd) + kd*sinh(kd)) +\n surge_rao * k * sinh(kd)) /\n (k * cosh(kd) * hankel2d(1, ka))\n * hankel_term2)\n\n return 0.5 / ka * np.real(incoming_part + motion_part)", "def sphere_norm_by_layer(M):\r\n M_inner = M\r\n shape = np.shape(M_inner)\r\n if len(shape) == 3:\r\n for i in range(shape[0]):\r\n norm = np.sqrt(np.sum(M_inner[i, :, :] ** 2))\r\n M_inner[i, :, :] = M_inner[i, :, :] / norm\r\n return M_inner\r\n if len(shape) == 2:\r\n norm = np.sqrt(np.sum(M_inner ** 2))\r\n M_inner = M_inner / norm\r\n return M_inner", "def calc_SFRSD_inside_half_mass(galObj, gas_SFR, gas_m, gas_pos, halfMassR='gas'):\n\n if halfMassR == 'gas':\n half_mass_radius = galObj.radii['gas'].in_units('kpc')\n elif halfMassR == 'stars':\n half_mass_radius = galObj.radii['stellar_half_mass'].in_units('kpc')\n\n extent = np.sqrt(gas_pos[:, 0]**2 + gas_pos[:, 1]**2 + gas_pos[:, 2]**2)\n print(extent.shape)\n mask = extent <= half_mass_radius\n print(\"SFR inner: {:.2f}\".format(np.sum(gas_SFR[mask])))\n print(\"SFR global: {:.2f}\".format(galObj.sfr))\n #\n SFRSD = np.sum(gas_SFR[mask]) / np.pi/half_mass_radius**2\n print(\"SFRSD: {:.2f}\".format(SFRSD))\n print(galObj.sfr / np.pi / half_mass_radius**2)\n return SFRSD", "def partsphere(self, x):\r\n self.counter += 1\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n dim = len(x)\r\n x = array([x[i % dim] for i in range(2*dim)])\r\n N = 8\r\n i = self.counter % dim\r\n #f = sum(x[i:i + N]**2)\r\n f = sum(x[np.random.randint(dim, size=N)]**2)\r\n return f", "def mt(P_1,V0_1,meanF_1,rho): \n psi = np.arctan2(V0_1[2],-V0_1[0])\n \n # Find swept ares\n idx_zmax = np.argmax(P_1[:,-1,2])\n idx_ymax = np.argmax(P_1[:,-1,1])\n idx_zmin = np.argmin(P_1[:,-1,2])\n \n Ad = np.linalg.norm(P_1[idx_zmax,-1,2]-P_1[idx_zmin,-1,2])*P_1[idx_ymax,-1,1]\n print P_1[idx_zmax,-1,2]\n V0 = np.linalg.norm(V0_1)\n \n Vi_1new = np.zeros_like(V0_1,dtype=float)\n\n while True:\n Vi_1 = Vi_1new\n \n Vi_1new[0] = meanF_1[0] / (2 * rho * Ad * np.sqrt( (V0*np.cos(psi)+Vi_1[0])**2 + (-V0*np.sin(psi)+Vi_1[2])**2 )) \n Vi_1new[2] = meanF_1[2] / (2 * rho * Ad * np.sqrt( (V0*np.cos(psi)+Vi_1[0])**2 + (-V0*np.sin(psi)+Vi_1[2])**2 )) \n \n if np.linalg.norm(Vi_1-Vi_1new) < 0.001:\n break\n\n return -Vi_1", "def external_radius_function(self, gama):\n radius_external = self.radius_stator\n xre = radius_external * np.cos(gama)\n yre = radius_external * np.sin(gama)\n\n return radius_external, xre, yre", "def fix_sphere_h (center_x, center_y, center_z, radius, centers, radii, len_points, list_of_a):\n g_x = []\n g_y = []\n g_z = []\n points = [hydrogen_coord_gen(center_x, center_y, center_z, radius) for i in range(0, len_points)] \n x = [points[i][0] for i in range(0, len(points))] \n y = [points[i][1] for i in range(0, len(points))]\n z = [points[i][2] for i in range(0, len(points))]\n for i in range(0, len(points)):\n check = 0\n check_b = 0\n j = 0\n while (j <= (len(centers) - 1) and (check == 0)): \n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], centers[j][0], centers[j][1], centers[j][2]) < radii[j]):\n check += 1\n j += 1\n h = 0\n while ((check_b == 0) and (h <= len(list_of_a) -1)):\n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], list_of_a[h].x, list_of_a[h].y, list_of_a[h].z) <= 1.50): \n check_b += 1\n h += 1\n if ((check == 0) and (check_b == 0)):\n g_x.append(x[i])\n g_y.append(y[i])\n g_z.append(z[i])\n return g_x, g_y, g_z", "def test_function(self):\n # almost spherical case\n x = 1.\n y = 1.\n e1, e2 = 5e-5, 0.\n sigma = 1.\n amp = 2.\n\n f_ = self.gaussian_kappa_ellipse.function(x, y, amp, sigma, e1, e2)\n\n r2 = x*x + y*y\n f_sphere = amp/(2.*np.pi*sigma**2) * sigma**2 * (np.euler_gamma -\n expi(-r2/2./sigma**2) + np.log(r2/2./sigma**2))\n\n npt.assert_almost_equal(f_, f_sphere, decimal=4)\n\n # spherical case\n e1, e2 = 0., 0.\n f_ = self.gaussian_kappa_ellipse.function(x, y, amp, sigma, e1, e2)\n\n npt.assert_almost_equal(f_, f_sphere, decimal=4)", "def mean_radius(self):\n return self._mean_radius", "def sphere_generator():\n\n sphericalRadius = np.sqrt(N / (4 * np.pi * pointDensity))\n sphericalThreshold = sphericalRadius * np.arccos(1 - 2 * thresholdFrac)\n\n data_sphere = []\n # np.random.seed(2020)\n for r in range(num_graphs):\n coords = sample_spherical(N, sphericalRadius, 3)\n # computes the adjacency matrix\n Adj_Matrix = np.zeros((N, N))\n for i in range(N):\n for j in range(N):\n a = coords[:, i]\n b = coords[:, j]\n dot_prod = np.dot(a, b)/sphericalRadius**2\n dot_prod = min(dot_prod, 1) # <-- sometimes np.dot returns 1.00000000002, messing up np.arccos()\n\n \"\"\" note that when np.arrcos gets 1, it returns a nan \"\"\"\n theta = np.arccos(dot_prod) # gets the angle between a and b (in radians)\n\n # ij_dist = np.linalg.norm(a-b) # calculate euclidean distance\n ij_dist = sphericalRadius * theta # arclength distance\n if ij_dist < sphericalThreshold:\n Adj_Matrix[i, j] = 1 # nodes that are connected are assigned a 1 in the matrix\n\n data_sphere.append(Adj_Matrix)\n\n return data_sphere", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def local_density_mean(self):\n\n # the simulation units are msun / kpc ^3\n local = np.mean(self.dens)\n\n return local", "def distance_to_galactic_center(self):\n l, b = self.galactic_coords\n h_star_gcp = self.distance * np.sin(b)\n d_star_sun = self.distance * np.cos(b)\n d_star_gc = np.sqrt(d_star_sun**2 + d_sun_GC**2 - 2*d_star_sun*d_sun_GC*np.cos(l))\n return d_star_gc", "def fix_sphere_m (center_x, center_y, center_z, radius, centers, radii, len_points):\n \n g_x = []\n g_y = []\n g_z = []\n points = [hydrogen_coord_gen(center_x, center_y, center_z, radius) for i in range(0, len_points)] \n x = [points[i][0] for i in range(0, len(points))] \n y = [points[i][1] for i in range(0, len(points))]\n z = [points[i][2] for i in range(0, len(points))]\n\n for i in range(0, len(points)):\n check = 0\n j = 0\n while (j <= (len(centers) - 1) and (check == 0)): \n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], centers[j][0], centers[j][1], centers[j][2]) < radii[j]):\n check += 1\n j += 1\n if (check == 0):\n g_x.append(x[i])\n g_y.append(y[i])\n g_z.append(z[i])\n\n return g_x, g_y, g_z", "def f(k):\n return k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(2, k * r)", "def sectorsphere(self, x):\r\n return sum(x**2) + (1e6-1) * sum(x[x<0]**2)", "def f(k):\n return k * k * k * pk(k, suppression) * spherical_jn(1, k * r)", "def getSphereRadius(self):\n return 1.5", "def galaxy():\n rot_ang = 1\n pol_ang = 1\n\n\n time_array = [datetime.datetime(2017, 5, 25, 2, 0),\n datetime.datetime(2017, 5, 26, 7, 0),\n #~ datetime.datetime(2017, 5, 28, 1, 0),\n #~ datetime.datetime(2017, 5, 30, 8, 0),\n datetime.datetime(2017, 6, 4, 2, 0)]\n\n lfdic = {1:{'name':'LI', 'lat':[26,33,19.676], 'long':[97,26,31.174], 't_offset':6.496132851851852},\n 2:{'name':'LII', 'lat':[34,4,43.497], 'long':[107,37,5.819], 't_offset':7.174552203703703},\n 3:{'name':'LIII', 'lat':[38,25,59.0], 'long':[79,50,23.0], 't_offset':5.322648148148148},\n 4:{'name':'LIV', 'lat':[34,12,3.0], 'long':[118,10,18.0], 't_offset':7.87811111111111}}\n lfs = lfdic[4]\n long_radians = (lfs['long'][0] + lfs['long'][1]/60.0 + lfs['long'][2]/3600.0)*np.pi/180.0\n\n LoFASM = station(lfs['name'],lfs['lat'],lfs['long'],FOV_color='b',\n time='',frequency=20.0,one_ring='inner',\n rot_angle=rot_ang,pol_angle=pol_ang)\n innerNS_FOV = 0.61975795698554226 #LoFASM.lofasm.Omega()\n inner_conversion_NS = np.divide((np.power(np.divide(3.0*1.0e8,45.0e6),2)),(innerNS_FOV))\n\n print('Stage 1/2 Done.')\n\n powe = np.multiply(LoFASM.calculate_gpowervslstarray(time_array),inner_conversion_NS)\n power = 10*np.log10(np.array(powe))\n print('Stage 2/2 Done.')\n\n return power", "def exp_map(b, p):\n \"\"\"\n EXP_MAP The exponential map for n-spheres\n b is the base point (vector in R^n), norm(b)=1\n p is a point on the tangent plane to the hypersphere at b (also a vector in R^n)\n\n method can be 0 or 1:\n 0: hypersphere (e.g. quaternions)\n 1: dual quaternion\n \"\"\"\n if np.allclose(b, p):\n x = b\n else:\n theta = np.linalg.norm(b - p)\n dminusbx = np.sqrt(2 - 2. * np.cos(np.pi - theta))\n l = 2. * np.sin(theta / 2)\n alpha = np.arccos((4 + dminusbx ** 2 - l ** 2) / (4 * dminusbx))\n dpb = 2. * np.tan(alpha)\n v = b + ((p - b) / np.linalg.norm(p - b)) * dpb\n x = ((v + b) / np.linalg.norm(v + b)) * dminusbx - b\n\n return x", "def sphere_l_intensity(img):\n pixels = []\n for j in range(0, img.shape[0]):\n for i in range(1, 40):\n pixels.append(img[j, i])\n\n return np.mean(pixels)", "def sphere(geometry,\n psd_name,psd_shape,psd_loc,psd_scale,\n pore_seed='pore.seed',\n psd_offset=0,\n **kwargs):\n import scipy.stats as spst\n prob_fn = getattr(spst,psd_name)\n P = prob_fn(psd_shape,loc=psd_loc,scale=psd_scale)\n value = P.ppf(geometry[pore_seed])+psd_offset\n return value", "def getHardSphereRadius(self):\n\n if self.__hardSphereRadius is not None:\n return self.__hardSphereRadius\n return self.__scatteringRadius", "def sphere_r_intensity(img):\n pixels = []\n for j in range(0, img.shape[0]):\n for i in range(1, 40):\n pixels.append(img[j, img.shape[1] - i])\n\n return np.mean(pixels)", "def sh( values ):\n # ECMWF normalizes the spherical harmonic coeffs differently than NCEP.\n # (m=0,n=0 is global mean, instead of sqrt(2)/2 times global mean)\n fld = 2.*values/np.sqrt(2.)\n \n #------SPLITTING IT UP IN AN IMAGARY AND REAL PART--------\n fldr = fld[ 0::2 ] #annenhver verdi fra 0\n fldi = fld[ 1::2 ] #annenhver verdi fra 1\n fldn = np.zeros( fldr.shape, 'F' ) #blir halvparten så stor som orginale fld\n fldn.real = fldr #legges da til i fldn vectoren\n fldn.imag = fldi\n #----------------------------------------------------------\n \n nlons = 360 #Have a feeling it probably is number of values like grid val\n nlats = 1280 #web sais it shourld be 180.. wellwell, seems to work\n s = spharm.Spharmt( nlons, nlats ) \n \n data = s.spectogrd( fldn ) #Hvis nlats = 180, så feiler denne delen pga hvordan formelen fungerer..\n \n lons = ( 360./nlons ) * np.arange( nlons )\n lats = 90.-( 180./( nlats - 1 ) ) * np.arange( nlats )\n lons, lats = np.meshgrid( lons, lats )\n \n #stack grids side-by-side (in longitiudinal direction), so\n # any range of longitudes (between -360 and 360) may be plotted on a world map.\n lons = np.concatenate(( lons - 360, lons ), 1 )\n lats = np.concatenate(( lats, lats ), 1 )\n data = np.concatenate(( data, data ), 1 )\n \n return lats, lons, data", "def average_normal_projections(fr,mvec,pivot,maxflux,do_inflate=False):\n\tglobal surf,surfs,mesh\n\t#---! getting: calcs/codes/mesh.py:24: RuntimeWarning: invalid value encountered in divide ... in vecnorm\n\t#---inflate the instantaneous surface\n\tthis_surf_inflated = surfs[fr]#inflate_lateral(surfs[fr],inflate_factor)\n\t#---find the points on the instantaneous surface which are nearest the points on the regular grid on the average\n\t#---convert instantaneous points to XYZ with the reference box vectors mvec\n\tinstant_all = boxstuff(height_recenter(literalize(this_surf_inflated,mvec),pivot=pivot,maxflux=maxflux),mvec)\n\t#---after literalizing the inflated points, we take only the points which are relevant to the base structure\n\t#---! is the order correct?\n\tif do_inflate:\n\t\tsource = surf_average_base\n\t\tinds = np.concatenate(np.transpose(np.meshgrid(*[np.arange(-inflate_factor,i+inflate_factor+1) \n\t\t\tfor i in source.shape])))\n\t\tbase = np.where(np.all((np.all(inds>0,axis=1),np.all(np.array(source.shape)>=inds,axis=1)),axis=0))[0]\n\t\tinstant = instant_all[base]\n\telse: instant = instant_all\n\t#---note that we make a tree from the instantaneous points then probe over the average surface\n\t#---! more efficient to do this in reverse, however it might not cover all of the average/reference points?\n\t#---prepare a KDTree. we use a fudge factor of 1000 epsilon to avoid angry errors about being outside the box\n\ttree = scipy.spatial.ckdtree.cKDTree(instant,boxsize=np.concatenate((mvec,mvec))+1000.*eps)\n\t#---find the nearest reference points for each instantaneous point\n\tclose,nns = tree.query(surf,k=1)\n\t#---given a mapping between instantaneous point and target position (on XY), project the instantaneous point\n\t#---...onto the tangent plane given by the reference point. note that this is obviously a minor approximation in \n\t#---...which we assume that the point is hovering \"above\" the reference point close enough that the projection onto\n\t#---...that tangent plane is correct. a more literal form of this might actually try to find the exact distance to \n\t#---...the triangle adjacent to the nearest reference vertex, but this would require adding extra neighbor\n\t#---...information and I think it takes the surface a bit too literally.\n\t#---! note that we could use the real points instead of regular grid points for the instantaneous point?\n\tdeviations = np.array([\n\t\tget_normal_fluctuation(\n\t\t\tnormal=mesh['vertnorms'][index],\n\t\t\ttarget=surf[index],\n\t\t\thover=instant[nns][index],\n\t\t\tvec=mvec) \n\t\tfor ii,index in enumerate(nns)])\n\t#---corners fail for whatever reason. could not get the do_inflate method working\n\tdeviations[np.isnan(deviations)] = 0.0\n\treturn deviations", "def spherefcn(x: np.ndarray) -> np.ndarray:\n if x.ndim == 1:\n x = x.reshape(-1, len(x))\n f = np.sum(x**2, axis=1)\n return f.reshape(-1, 1)", "def insphere(network,\n geometry,\n **kwargs):\n import warnings\n try:\n import pulp as pu\n Np = geometry.num_pores()\n value = _sp.zeros(Np)\n pore_map = geometry.map_pores(geometry.pores(),geometry._net)\n for geom_pore,net_pore in pore_map:\n net_throats = geometry._net.find_neighbor_throats(net_pore)\n geom_throats = geometry._net.map_throats(net_throats,geometry)[:,1]\n verts = geometry['throat.offset_vertices'][geom_throats]\n if len(verts) > 1:\n try:\n pts = np.vstack((i for i in verts if len(i)>0))\n except ValueError:\n pts = []\n if len(pts) > 4:\n \"Work out central point to use as initial guess\"\n c0 = np.mean(pts,axis=0)\n \"Compute convex hull to find points lying on the hull in order\"\n hull = ConvexHull(pts, qhull_options='QJ Pp')\n \"For each simplex making up the hull collect the end points\"\n A = pts[hull.simplices[:,0]]\n B = pts[hull.simplices[:,1]]\n C = pts[hull.simplices[:,2]]\n #I = np.array([[0,1],[-1,0]])\n \"Normal of the simplices\"\n #N = np.dot((B-A),I)\n N = np.cross((B-A),(C-A),axis=1)\n #L = np.sqrt(np.sum(np.square(N),axis=1))\n \"Normalize the normal vector\"\n L = np.linalg.norm(N,axis=1)\n F = np.vstack((L,L,L)).T\n N /= F\n \"If normals point out of hull change sign to point in\"\n pointing_out = (np.sum((A-c0)*N,axis=1)>0)\n N[pointing_out]*= -1\n \"Define Linear Program Variables\"\n \"The centre of the incircle adjustment\"\n cx = pu.LpVariable(\"cx\",None,None,pu.LpContinuous)\n cy = pu.LpVariable(\"cy\",None,None,pu.LpContinuous)\n cz = pu.LpVariable(\"cz\",None,None,pu.LpContinuous)\n \"Radius of the incircle\"\n R = pu.LpVariable(\"R\",0,None,pu.LpContinuous)\n \"Slack variables for shortest distance between centre and simplices\" \n S = pu.LpVariable.dict(\"SlackVariable\",range(len(A)),0,None,pu.LpContinuous)\n \"Set up LP problem\"\n prob = pu.LpProblem(\"FindInRadius\",pu.LpMaximize)\n \"Objective Function\"\n prob += R\n for i in range(len(A)):\n \" Ni.(C-Ai)-Si = 0\"\n prob += N[i][0]*(c0[0]+cx) + N[i][1]*(c0[1]+cy) + N[i][2]*(c0[2]+cz)- N[i][0]*A[i][0] - N[i][1]*A[i][1] - N[i][2]*A[i][2]- S[i] == 0\n \"Si >= R\"\n prob += S[i] >= R\n \"Solve the LP\"\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n prob.solve()\n \"As the radius is the objective function we can get it from the objective or as R.value()\"\n rad = prob.objective.value()\n #cen = c0 + np.array([cx.value(),cy.value(),cz.value()])\n value[geom_pore]=rad*2\n \n \n return value\n except ImportError:\n print(\"Cannot use insphere method without installing pulp package\")", "def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean", "def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean", "def compute_mean_square_speed(self):\n speeds = self.compute_speeds() # speed of all particles\n return np.mean(speeds**2) # mean square speed", "def calculate_root_statistics(self):\n total_length = 0\n total_radius = 0\n\n for i in range(len(self.pixel_list)):\n\n total_radius += self.pixel_list[i].radius + 0.5\n\n if i > 0:\n # Use the distance formula\n delta_x = self.pixel_list[i].x - self.pixel_list[i - 1].x\n delta_y = self.pixel_list[i].y - self.pixel_list[i - 1].y\n segment_length = math.sqrt(delta_x ** 2 + delta_y ** 2)\n total_length += segment_length\n\n self.total_length = total_length\n self.average_radius = total_radius / len(self.pixel_list)", "def update_only_total_statistics(self):\n\n self.average_radius = 0\n self.total_root_length = 0\n\n total_radius = 0\n\n for root in self.root_dict.values():\n\n self.total_root_length += root.total_length\n\n total_radius += root.total_length * root.average_radius\n\n self.average_radius = total_radius / self.total_root_length", "def averageInsideVertices(mesh):\r\n cmds.select(mesh)\r\n cmds.polySelectConstraint(m=3, t=0x0001, w=2)\r\n cmds.polySelectConstraint(dis=True)\r\n cmds.polyAverageVertex(i = 10, ch = 0)", "def real_sph_harm(m, n, theta, phi):\n m = atleast_1d(m)\n # find where m is =,< or > 0 and broadcasts to the size of the output\n m_eq0,junk,junk,junk = broadcast_arrays(m == 0, n, theta, phi)\n m_gt0,junk,junk,junk = broadcast_arrays(m > 0, n, theta, phi)\n m_lt0,junk,junk,junk = broadcast_arrays(m < 0, n, theta, phi)\n\n sh = sph_harm(m, n, theta, phi)\n real_sh = empty(sh.shape, 'double')\n real_sh[m_eq0] = sh[m_eq0].real\n real_sh[m_gt0] = sh[m_gt0].real * sqrt(2)\n real_sh[m_lt0] = sh[m_lt0].imag * sqrt(2)\n return real_sh", "def filled_sphere(shape, radius, center=None):\n\tr2 = radius*radius\n\tif center is None:\n\t\t### set to center of array\n\t\tcenter = (shape[0]-1)/2.0,(shape[1]-1)/2.0,(shape[2]-1)/2.0\n\tdef func(i0, i1, i2):\n\t\tii0 = i0 - center[0]\n\t\tii1 = i1 - center[1]\n\t\tii2 = i2 - center[2]\n\t\trr2 = ii0**2 + ii1**2 + ii2**2\n\t\tc = numpy.where(rr2<r2, 0.0, 1.0)\n\t\treturn c\n\treturn numpy.fromfunction(func, shape)", "def ComputeExternalForces(m, h, g, thetas, simp=True):\n assert len(m) == len(h) == len(thetas), \\\n f\"Mass {len(m)}, height {len(h)}, thetas {len(thetas)} size mismatch\"\n \n # compute the potential energy \n V = 0\n n = len(m)\n for i in range(n):\n V += m[i] * h[i] * g\n \n # compute the effect of the gravitational forces\n N = zeros(n, 1)\n for i in range(n):\n if thetas[i] == 0.0:\n N[i] = 0.0\n else:\n N[i] = diff(V, thetas[i])\n \n if simp:\n return simplify(N)\n return N", "def run_lpme(self) -> np.array:\n q = self.sphere.n\n signs = []\n for i in range(q):\n a = np.ones(q)\n a = a / np.sqrt(q)\n a_prime = np.copy(a)\n a_prime[i] = -a_prime[i]\n\n z_a = a * self.sphere.radius + self.sphere.origin\n z_a_prime = a_prime * self.sphere.radius + self.sphere.origin\n\n if self.oracle.compare(z_a, z_a_prime):\n signs.append(1.0)\n else:\n signs.append(-1.0)\n\n orthants = initialize_orthants(signs)\n\n # number of cycles\n nc = 4\n theta_list = [(orth.start + orth.stop) / 2 for orth in orthants]\n for _ in range(0, nc):\n for j in range(0, q - 1):\n theta_a = orthants[j].start\n theta_b = orthants[j].stop\n while abs(theta_b - theta_a) > self.e:\n theta_c = (theta_a * 3 + theta_b) / 4\n theta_d = (theta_a + theta_b) / 2\n theta_e = (theta_a + theta_b * 3) / 4\n\n theta_list[j] = theta_a\n vec_a = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_b\n vec_b = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_c\n vec_c = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_d\n vec_d = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_e\n vec_e = compute_vector(self.sphere, theta_list)\n\n # compare ac\n cac = self.oracle.compare(vec_a, vec_c)\n ccd = self.oracle.compare(vec_c, vec_d)\n cde = self.oracle.compare(vec_d, vec_e)\n ceb = self.oracle.compare(vec_e, vec_b)\n self.num_queries += 4\n\n if self.check_i:\n context = {\n \"theta_list\": theta_list,\n \"j\": j,\n \"theta_a\": theta_a,\n \"theta_b\": theta_b,\n \"theta_c\": theta_c,\n \"theta_d\": theta_d,\n \"theta_e\": theta_e,\n }\n self.check_inconsistency(cac, ccd, cde, ceb, context)\n\n if cac:\n theta_b = theta_d\n elif ccd:\n theta_b = theta_d\n elif cde:\n theta_a = theta_c\n theta_b = theta_e\n elif ceb:\n theta_a = theta_d\n else:\n theta_a = theta_d\n\n # update theta list\n theta_list[j] = (theta_a + theta_b) / 2\n\n # save theta list\n self.theta_list = theta_list\n return normalize(compute_vector(self.sphere, theta_list) - self.sphere.origin)", "def get_steering_vector():\n\n # Distance between elements in wavelengths\n players_distances = [i * ant_dist for i in range(n_antennas)]\n\n # Angle function (steering vector)\n ang_fun = np.empty(shape=(len(all_angles), n_antennas), dtype=\"complex128\")\n for th in all_angles:\n ang_fun[th] = [cmath.exp(-1j * 2 * cmath.pi * dist * cmath.cos(th * cmath.pi / 180)) for dist in\n players_distances]\n\n # Perform averaging of the steering vector to compare results with the averaged pseudospectrum\n ang_fun_avg = np.empty(shape=(int(len(all_angles) / ang_avg_fact), n_antennas), dtype=\"complex128\")\n avged_ang_range = [x + ang_avg_fact / 2 for x in range(0, len(all_angles), ang_avg_fact)]\n\n # Avg angles over some averaging factor\n for th in range(0, len(all_angles), ang_avg_fact):\n avg_th = int(th / ang_avg_fact)\n avgd_angle = sum([x for x in ang_fun[th:th + ang_avg_fact]]) / ang_avg_fact\n ang_fun_avg[avg_th] = avgd_angle\n\n return ang_fun, ang_fun_avg, avged_ang_range", "def spherearea(dia):\n r = dia*1e-4 # convert to cm\n return(4*np.pi*r**2)", "def distance_23(self, alphas, motor_positions):\n\n self.roof_vertex_z[1] = motor_positions[1] * math.cos(alphas[1])\n s2 = motor_positions[1] * math.sin(alphas[1])\n self.roof_vertex_x[1] = self.base_point[1][0] - (s2 * 0.5) # sin 30° = 1/2\n self.roof_vertex_y[1] = self.base_point[1][1] - (s2 * 0.8660254037844386) # cos 30° = sqrt(3) / 2\n\n self.roof_vertex_z[2] = motor_positions[2] * math.cos(alphas[2])\n s3 = motor_positions[2] * math.sin(alphas[2])\n self.roof_vertex_x[2] = self.base_point[2][0] - (s3 * 0.5) # sin 30° = 1/2\n self.roof_vertex_y[2] = self.base_point[2][1] + (s3 * 0.8660254037844386) # cos 30° = sqrt(3) / 2\n\n return math.sqrt(\n ((self.roof_vertex_x[2] - self.roof_vertex_x[1]) ** 2) +\n ((self.roof_vertex_y[1] - self.roof_vertex_y[2]) ** 2) +\n ((self.roof_vertex_z[2] - self.roof_vertex_z[1]) ** 2))", "def _calculate_distances(self):\n all_dists = []\n for ref in range(len(self.atoms)):\n if self.atoms[ref].symbol in self.exclude:\n continue\n indices = list(range(ref+1, len(self.atoms)))\n indices = self._filter_excluded(indices)\n if len(indices) == 0:\n continue\n dists = self.atoms.get_distances(ref, indices, mic=True)\n all_dists += list(dists)\n \n # Normalize by the mean distance\n return np.array(all_dists)/np.mean(all_dists)", "def compute_energy(self):\n\n # radiation energy\n Qsqrd = self.omega_coords[:,:,1]*self.omega_coords[:,:,1]\n Psqrd = self.omega_coords[:,:,0]*self.omega_coords[:,:,0]\n\n e_rad = (Psqrd/self.mode_mass + (self.mode_mass*self.omega**2)*Qsqrd)*.5\n\n # space charge energy\n Dsqrd = self.dc_coords[:,:,0]*self.dc_coords[:,:,0]\n\n e_drft = Dsqrd/(2.*self.mode_mass)\n\n energy = e_rad+e_drft\n\n return energy", "def getRoots(self):\n return [float(-self.getCoefficients()[0])/self.getCoefficients()[1]]", "def calculate_soma_surface(data: Data) -> float:\n\n soma = data.morphology.get_soma()\n return 4.0 * math.pi * soma['radius'] * soma['radius']", "def sphvol(r):\n return (4./3.)*np.pi*(r**3.)", "def calculate_H(s_lat,s_lon,e_lat,e_lon):\n R = 6371.0\n snlat = radians(s_lat)\n snlon = radians(s_lon)\n elat = radians(e_lat)\n elon = radians(e_lon)\n actual_dist = 6371.01 * acos(sin(snlat) * sin(elat) + cos(snlat) * cos(elat) * cos(snlon - elon))\n actual_dist = actual_dist * 1000\n return actual_dist", "def compute_centrifugal(self):\r\n # update the coordinates\r\n self.get_coords()\r\n\r\n # compute the centrifugal force\r\n self.centrifugal.assign(project(\r\n -1*self.rho*cross(self.omega, cross(self.omega, self.r)), self.V))", "def inradius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, cast(Segment, self.edges[0]).midpoint)", "def generate_sphere_full():\n \n num_voxels = 31\n c = (15.0, 15.0, 15.0)\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if numpy.sqrt((x-c[0])**2 + (y-c[1])**2 + (z-c[2])**2) - 7.5 < 1.5:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume", "def __podluzna(self, sph_func, R):\n q = self.omega / self.c\n return (spherical_gradient(self.l, q*R.r, sph_func)\n * _sph_harm(self.m, self.l, R.theta, R.phi)\n + sph_func(self.l, q*R.r)\n * sph_harm_gradient(self.m, self.l, q*R.r, R.theta, R.phi)\n ) / q", "def sphereArea(radius):\n area = 4 * math.pi * radius ** 2\n return area", "def sph_radial_basis(lon, lat, data, longitude, latitude, smooth=0.,\n epsilon=None, method='inverse', QR=False, norm='euclidean'):\n\n #-- remove singleton dimensions\n lon = np.squeeze(lon)\n lat = np.squeeze(lat)\n data = np.squeeze(data)\n longitude = np.squeeze(longitude)\n latitude = np.squeeze(latitude)\n #-- size of new matrix\n if (np.ndim(longitude) > 1):\n nlon,nlat = np.shape(longitude)\n sz = np.int(nlon*nlat)\n else:\n sz = len(longitude)\n\n #-- Check to make sure sizes of input arguments are correct and consistent\n if (len(data) != len(lon)) | (len(data) != len(lat)):\n raise Exception('Length of Longitude, Latitude, and Data must be equal')\n if (np.shape(longitude) != np.shape(latitude)):\n raise Exception('Size of output Longitude and Latitude must be equal')\n\n #-- create python dictionary of radial basis function formulas\n radial_basis_functions = {}\n radial_basis_functions['multiquadric'] = multiquadric\n radial_basis_functions['inverse_multiquadric'] = inverse_multiquadric\n radial_basis_functions['inverse'] = inverse_multiquadric\n radial_basis_functions['inverse_quadratic'] = inverse_quadratic\n radial_basis_functions['gaussian'] = gaussian\n radial_basis_functions['linear'] = linear\n radial_basis_functions['cubic'] = cubic\n radial_basis_functions['quintic'] = quintic\n radial_basis_functions['thin_plate'] = thin_plate\n #-- create python dictionary of radial basis function expansions\n radial_expansions = {}\n radial_expansions['multiquadric'] = multiquadratic_expansion\n radial_expansions['inverse_multiquadric'] = inverse_multiquadric_expansion\n radial_expansions['inverse'] = inverse_multiquadric_expansion\n radial_expansions['inverse_quadratic'] = inverse_quadratic_expansion\n radial_expansions['gaussian'] = gaussian_expansion\n #-- check if formula name is listed\n if method in radial_basis_functions.keys():\n RBF = radial_basis_functions[method]\n else:\n raise ValueError(\"Method {0} not implemented\".format(method))\n #-- check if formula name is valid for QR factorization method\n if QR and (method in radial_expansions.keys()):\n expansion = radial_expansions[method]\n elif QR and (method not in radial_expansions.keys()):\n raise ValueError(\"{0} expansion not available with QR\".format(method))\n #-- create python dictionary of distance functions (if not using QR)\n norm_functions = {}\n norm_functions['euclidean'] = distance_matrix\n norm_functions['GCD'] = angle_matrix\n if norm in norm_functions:\n norm_matrix = norm_functions[norm]\n else:\n raise ValueError(\"Distance Function {0} not implemented\".format(norm))\n\n #-- convert input lat and lon into cartesian X,Y,Z over unit sphere\n phi = np.pi*lon/180.0\n th = np.pi*(90.0 - lat)/180.0\n xs = np.sin(th)*np.cos(phi)\n ys = np.sin(th)*np.sin(phi)\n zs = np.cos(th)\n #-- convert output longitude and latitude into cartesian X,Y,Z\n PHI = np.pi*longitude.flatten()/180.0\n THETA = np.pi*(90.0 - latitude.flatten())/180.0\n XI = np.sin(THETA)*np.cos(PHI)\n YI = np.sin(THETA)*np.sin(PHI)\n ZI = np.cos(THETA)\n\n #-- Creation of data distance matrix (Euclidean or Great-Circle Distance)\n #-- Data to Data\n Rd = norm_matrix(np.array([xs, ys, zs]),np.array([xs, ys, zs]))\n N,M = np.shape(Rd)\n #-- if epsilon is not specified\n if epsilon is None:\n #-- calculate norm with mean distance\n uix,uiy = np.nonzero(np.tri(N,M=M,k=-1))\n epsilon = np.mean(Rd[uix,uiy])\n\n #-- QR factorization algorithm of Fornberg (2007)\n if QR:\n #-- calculate radial basis functions using spherical harmonics\n R,w = RBF_QR(th,phi,epsilon,data,expansion)\n n_harm = np.sqrt(np.shape(R)[0]).astype(np.int)\n #-- counter variable for filling spherical harmonic matrix\n index = 0\n #-- evaluation matrix E\n E = np.zeros((sz,np.int(n_harm**2)))\n for l in range(0,n_harm):\n #-- Each loop adds a block of columns of degree l to E\n E[:,index:2*l+index+1] = spherical_harmonic_matrix(l,THETA,PHI)\n index += 2*l + 1\n #-- calculate output interpolated array (or matrix)\n output = np.dot(E,np.dot(R,w))\n else:\n #-- Calculation of the PHI Matrix with smoothing\n PHI = np.zeros((N+1,M+1))\n PHI[:N,:M] = RBF(epsilon, Rd) + np.eye(N,M=M)*smooth\n #-- Augmentation of the PHI Matrix with a Constant Vector\n PHI[:N,M] = np.ones((N))\n PHI[N,:M] = np.ones((M))\n\n #-- Computation of the Weights\n DMAT = np.concatenate(([data,[0]]),axis=0)\n w = np.linalg.lstsq(PHI,DMAT[:,np.newaxis],rcond=-1)[0]\n\n #-- Computation of distance Matrix (Euclidean or Great-Circle Distance)\n #-- Data to Mesh Points\n Re = norm_matrix(np.array([XI,YI,ZI]),np.array([xs,ys,zs]))\n #-- calculate radial basis function for data-to-mesh matrix\n E = RBF(epsilon,Re)\n\n #-- Augmentation of the Evaluation Matrix with a Constant Vector\n P = np.ones((sz,1))\n E = np.concatenate(([E, P]),axis=1)\n #-- calculate output interpolated array (or matrix)\n output = np.dot(E,w)\n\n #-- reshape output to original dimensions and return\n if (np.ndim(longitude) == 1):\n return np.squeeze(output)\n else:\n return output.reshape(nlon,nlat)", "def objective_function(lam, segment_means, segment_lengths):\n return (segment_lengths*np.power(np.sin(segment_means*np.pi/lam), 2)).sum( )", "def compute_mesh_area_smart(mesh):\n mesh_surface_area = mesh.area\n return mesh_surface_area", "def sokal_sneath_coeff(self):\n a, c, _, b = self.to_ccw()\n return _div(a, a + 2 * (b + c))", "def intersection_ring(self, q_total):\n \n # WARNING: This ignores the effect of the incident angle\n \n \n\n # This is a point that intersects the Ewald sphere\n # (if incident_angle = 0)\n theta = np.arcsin(q_total/(2*self.get_k()))\n qx, qy, qz = 0, -q_total*np.sin(theta), q_total*np.cos(theta)\n \n #qx, qy, qz = 0, 0, q_total\n \n qxs = []\n qys = []\n qzs = []\n \n for rot_angle in np.linspace(0, 2*np.pi, num=200):\n qx_rot = qx*np.cos(rot_angle) + qz*np.sin(rot_angle)\n qy_rot = qy\n qz_rot = -qx*np.sin(rot_angle) + qz*np.cos(rot_angle)\n qxy_rot = np.sqrt(np.square(qx_rot)+np.square(qy_rot))\n if qx_rot<0:\n qxy_rot *= -1\n \n qxs.append( qx_rot )\n qys.append( qy_rot )\n qzs.append( qz_rot )\n \n return qxs, qys, qzs", "def potential_energies(self):\n # Create all pairs of planets\n pairs = itertools.combinations(self.bodies, 2)\n # Return the sum of all potential energies.\n return sum([-G * pair[0].mass * pair[1].mass /\n norm(pair[0].position - pair[1].position) for pair in pairs])", "def calcASA(atoms, probe=1.4, n_sphere_point=960):\r\n atoms.setRadii(getAtomRadii(atoms))\r\n\r\n sphere_points = generate_sphere_points(n_sphere_point)\r\n const = 4.0 * math.pi / len(sphere_points)\r\n\r\n test_point = [0.0, 0.0, 0.0]\r\n areas = []\r\n\r\n coords_all = atoms.getCoords()\r\n\r\n for i, atom_i in enumerate(atoms):\r\n\r\n neighbor_indices = find_neighbor_indices(atoms, probe, i)\r\n n_neighbor = len(neighbor_indices)\r\n j_closest_neighbor = 0\r\n radius = probe + atom_i.getRadius()\r\n\r\n n_accessible_point = 0\r\n for point in sphere_points:\r\n is_accessible = True\r\n\t test_point = np.dot(point,radius) + coords_all[i]\r\n cycled_indices = range(j_closest_neighbor, n_neighbor)\r\n cycled_indices.extend(range(j_closest_neighbor))\r\n\r\n for j in cycled_indices:\r\n atom_j = atoms[neighbor_indices[j]]\r\n r = atom_j.getRadius() + probe\r\n #diff_sq = np.linalg.norm(coords_all[neighbor_indices[j]] - test_point)\r\n diff_sq = pos_distance_sq(coords_all[neighbor_indices[j]], test_point)\r\n\t\tif diff_sq < r*r:\r\n j_closest_neighbor = j\r\n is_accessible = False\r\n break\r\n\t if is_accessible:\r\n n_accessible_point += 1\r\n\r\n area = const*n_accessible_point*radius*radius\r\n areas.append(area)\r\n\t#print str(atom_i.getResnum()) + \" \" + atom_i.getResname() + \" \" + str(area)\r\n return areas", "def twobody_acc(sat):\n pos = sat.getpos_sph()\n g_acc = [-G*M_EARTH/pos[0]**2, 0, 0]\n return g_acc", "def estimate_centroid(self):\r\n\t\tstrain = self.strain_distribution_compr(self.max_pure_compresive_strain,\\\r\n\t\t\tself.max_pure_compresive_strain)\r\n\t\tself.geometric_centrod = (self.depth/2) \r\n\t\tself.plastic_centroid = (self.depth/2)+\\\r\n\t\t\t(self.sectional_moment(strain, self.depth/2)/\\\r\n\t\t\tself.sectional_force(strain))", "def trust_region_solver(M, g, d_max, max_iter=2000, stepsize=1.0e-3):\n x = g / np.linalg.norm(g) * d_max\n for _ in range(max_iter):\n # gradient ascent\n x = x + stepsize * (M @ x + g)\n # projection to sphere\n x = x / np.linalg.norm(x) * d_max\n ## debug\n #loss = 0.5 * x.T @ M @ x + g.T @ x\n #print(f'Loss: {loss}')\n return x", "def energy(self):\n sum_energy = 0.0\n for i in range(0,self.natoms-1):\n for j in range(i+1,self.natoms):\n rij = (self.atoms[i].xyz - self.atoms[j].xyz)\n rij = rij - self.pbc_correction(rij)\n mag_rij = la.norm(rij)\n sum_energy = sum_energy + self.pair_energy(self.epsilon, self.sigma, mag_rij) \n return sum_energy", "def Ngal(self, m):\n return self.Ncen(m) + self.Nsat(m)", "def density_by_ideal_gas_law(\n p: tf.Tensor,\n r: tf.Tensor,\n t: tf.Tensor,\n ) -> tf.Tensor:\n return p / r / t", "def total_potential_energy(R,M,G):\r\n U = 0\r\n N = R.shape[0] \r\n \r\n for n in range(N):\r\n for nn in range(n+1,N):\r\n U = U - G*M[n]*M[nn] / util.enod(R[n,:],R[nn,:])\r\n \r\n return U", "def ldfe(n=3):\n\n # We will use the following coordinate system.\n #\n # | z, top\n # |\n # |\n # |\n # o------- x, right\n # /\n # /\n # /\n # / y, front\n\n # Cube inside the octant that touches the sphere at\n a = 1 / sqrt(3)\n\n # We have three important faces of the cube.\n # Start with the front face and refine it in N segments.\n x = linspace(0, a, n + 1)\n z = linspace(0, a, n + 1)\n\n # Then delta Omega_ij = [x_i,x_i+1] x [z_j,z_j+1]\n # Now go through every cell.\n points = zeros((1 * 1 * 4 * n * n, 3)) # 1/3 of the octants\n weights = zeros(1 * 1 * 4 * n * n)\n square = zeros(1 * 1 * 4 * n * n)\n counter = 0\n rhos0 = 0.1 * ones(4)\n for i in range(n):\n for j in range(n):\n x0, x1, z0, z1 = x[i], x[i + 1], z[j], z[j + 1]\n\n omegas = computeomegas(x0, x1, z0, z1)\n areas = computeareas(omegas, x0, x1, z0, z1)\n print(\"\\n\\nOptimiztation for:\")\n print(\"Domain:\")\n print([x0, x1, z0, z1])\n\n rhos = optimizeposition_leastsquares(areas, omegas, x0, x1, z0, z1,\n rhos0)\n rhos0 = rhos # take the optimal parameter of this cell as the starting value for the optimizer in the next cell\n dummy = rand()\n for k in range(4):\n points[counter, :] = project(omegas[k](rhos[k]))\n weights[counter] = areas[k]\n square[counter] = dummy\n counter += 1\n scatterplot(points, weights, square)\n return points, weights", "def N(latitude):\n return a/math.sqrt(1-e2*pow(math.sin(latitude),2.0))", "def calc_pna(h500_anom, time_name='time', lat_name='lat', lon_name='lon'):\n \n lat_p1 = 20\n lon_p1 = -160+360\n lat_p2 = 45\n lon_p2 = -165+360\n lat_p3 = 55\n lon_p3 = -115+360\n lat_p4 = 30\n lon_p4 = -85+360\n\n h500_anom_p1 = h500_anom.interp({lat_name : lat_p1, lon_name : lon_p1})\n h500_anom_p2 = h500_anom.interp({lat_name : lat_p2, lon_name : lon_p2})\n h500_anom_p3 = h500_anom.interp({lat_name : lat_p3, lon_name : lon_p3})\n h500_anom_p4 = h500_anom.interp({lat_name : lat_p4, lon_name : lon_p4})\n\n h500_anom_p1_group = h500_anom_p1.groupby(time_name+'.month')\n h500_anom_p2_group = h500_anom_p2.groupby(time_name+'.month')\n h500_anom_p3_group = h500_anom_p3.groupby(time_name+'.month')\n h500_anom_p4_group = h500_anom_p4.groupby(time_name+'.month')\n \n return 0.25 * ((h500_anom_p1_group / h500_anom_p1_group.std(time_name)).drop('month') - \\\n (h500_anom_p2_group / h500_anom_p2_group.std(time_name)).drop('month') + \\\n (h500_anom_p3_group / h500_anom_p3_group.std(time_name)).drop('month') - \\\n (h500_anom_p4_group / h500_anom_p4_group.std(time_name)).drop('month'))", "def Psi(l,m,theta,phi):\n if numpy.isscalar(theta): \n theta=numpy.array([[theta]])\n phi=numpy.array([[phi]])\n Psilm_th=numpy.zeros(theta.shape,dtype=complex)\n Psilm_ph=numpy.zeros(theta.shape,dtype=complex)\n x=numpy.cos(theta)\n thetaNonZerosIdx=numpy.where(theta!=0.0)\n if len(thetaNonZerosIdx[0]) != 0:\n Ylm=scipy.special.sph_harm(m,l,phi[thetaNonZerosIdx],theta[thetaNonZerosIdx])\n #Compute derivative of sphrHarm function w.r.t. theta:\n if l>=numpy.abs(m):\n Plmpo=legendreLM(l,m+1,x[thetaNonZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/float(math.factorial(l+m)))*Plmpo*numpy.exp(1j*m*phi[thetaNonZerosIdx])\n #YlmPmpo=sqrt((l-m)*(l+m+1))*spharm(l,m+1,theta,phi)*exp(-i*phi) %Should be equivalent to above formula.\n dtYlm=+YlmPmpo+m*x[thetaNonZerosIdx]*Ylm/numpy.sin(theta[thetaNonZerosIdx])\n # thetZerInd=[find(theta==0); find(theta==pi)]\n # dtYlm(thetZerInd)=0; %This is a fudge to remove NaNs\n else:\n dtYlm=numpy.zeros(theta[thetaNonZerosIdx].shape,dtype=complex)\n\n #dtYlm=spharmDtheta(l,m,theta,phi)\n\n Psilm_ph[thetaNonZerosIdx]=+1j*m/numpy.sin(theta[thetaNonZerosIdx])*Ylm\n Psilm_th[thetaNonZerosIdx]=+dtYlm\n #Ref: http://mathworld.wolfram.com/VectorSphericalHarmonic.html\n\n thetaZerosIdx=numpy.where(theta==0.0)\n if len(thetaZerosIdx[0]) != 0:\n if numpy.abs(m)==1:\n Yl1B=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*PBl1(l,m)*numpy.exp(1j*m*phi[thetaZerosIdx])\n Plmpo=legendreLM(l,m+1,x[thetaZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*Plmpo*numpy.exp(1j*m*phi[thetaZerosIdx])\n dtYlm=+YlmPmpo+m*Yl1B\n Psilm_ph[thetaZerosIdx]=+1j*m*Yl1B\n Psilm_th[thetaZerosIdx]=+dtYlm\n else:\n Plmpo=legendreLM(l,m+1,x[thetaZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*Plmpo*numpy.exp(1j*m*phi[thetaZerosIdx])\n dtYlm=+YlmPmpo+0\n Psilm_ph[thetaZerosIdx]=0\n Psilm_th[thetaZerosIdx]=+dtYlm\n return Psilm_th,Psilm_ph", "def hemisphere(self):\n return self._hemisphere", "def _apply_array_spatial12_halffilling(self, h1e: 'Nparray',\n h2e: 'Nparray') -> 'Nparray':\n if fqe.settings.use_accelerated_code:\n return self._apply_array_spatial12_lm(h1e, h2e)\n else:\n h1e = copy.deepcopy(h1e)\n h2e = numpy.moveaxis(copy.deepcopy(h2e), 1, 2) * (-1.0)\n norb = self.norb()\n for k in range(norb):\n h1e[:, :] -= h2e[:, k, k, :]\n\n if numpy.iscomplex(h1e).any() or numpy.iscomplex(h2e).any():\n dvec = self.calculate_dvec_spatial()\n out = numpy.einsum(\"ij,ijkl->kl\", h1e, dvec)\n dvec = numpy.einsum(\"ijkl,klmn->ijmn\", h2e, dvec)\n out += self._calculate_coeff_spatial_with_dvec(dvec)\n else:\n nij = norb * (norb + 1) // 2\n h1ec = numpy.zeros((nij), dtype=self._dtype)\n h2ec = numpy.zeros((nij, nij), dtype=self._dtype)\n for i in range(norb):\n for j in range(i + 1):\n ijn = j + i * (i + 1) // 2\n h1ec[ijn] = h1e[i, j]\n for k in range(norb):\n for l in range(k + 1):\n kln = l + k * (k + 1) // 2\n h2ec[ijn, kln] = h2e[i, j, k, l]\n dvec = self._calculate_dvec_spatial_compressed()\n out = numpy.einsum(\"i,ikl->kl\", h1ec, dvec)\n dvec = numpy.einsum(\"ik,kmn->imn\", h2ec, dvec)\n for i in range(self.norb()):\n for j in range(self.norb()):\n ijn = min(i, j) + max(i, j) * (max(i, j) + 1) // 2\n work = self._core.alpha_map(j, i)\n for source, target, parity in work:\n out[source, :] += dvec[ijn, target, :] * parity\n work = self._core.beta_map(j, i)\n for source, target, parity in work:\n out[:, source] += dvec[ijn, :, target] * parity\n\n return out", "def get_w_star(self):\n from .. import physics as phys\n return phys.w_star(self)" ]
[ "0.6078268", "0.6036835", "0.5775312", "0.57285535", "0.57063246", "0.56906533", "0.5667456", "0.5667456", "0.5620325", "0.5604258", "0.552257", "0.5496041", "0.5470228", "0.54219", "0.54151326", "0.54068613", "0.5400745", "0.5382959", "0.53717446", "0.5362374", "0.5358026", "0.5351502", "0.5329636", "0.5293419", "0.5292692", "0.52848357", "0.52811825", "0.52795357", "0.5262936", "0.5254609", "0.5249728", "0.52228564", "0.52124697", "0.5202268", "0.52018523", "0.52000386", "0.5184273", "0.51748097", "0.515998", "0.51520616", "0.51406026", "0.513389", "0.5130745", "0.5126844", "0.5115942", "0.5114313", "0.5112025", "0.5109913", "0.5100244", "0.50920266", "0.50842905", "0.50563127", "0.50557476", "0.50541335", "0.50475514", "0.50435865", "0.50435865", "0.50364673", "0.5035474", "0.5030805", "0.5022749", "0.5015224", "0.5010728", "0.50076556", "0.50033087", "0.49989977", "0.4985385", "0.49837673", "0.4982089", "0.49789837", "0.49767023", "0.49737436", "0.49524525", "0.49511978", "0.49452528", "0.4936891", "0.493184", "0.4930562", "0.49251536", "0.49157545", "0.49126616", "0.49103242", "0.49056977", "0.4905284", "0.4904709", "0.4903102", "0.49008492", "0.48958585", "0.48954573", "0.48946068", "0.4890797", "0.48886833", "0.48867926", "0.4885644", "0.4881047", "0.48698562", "0.48604152", "0.48597047", "0.48590618", "0.48551944" ]
0.73044544
0
Set up conditions for Earth's atmosphere.
def Atmosphere(temperature, relative_humidity=0, pressure=standard_atmospheric_pressure, velocity=np.zeros(3)): vapour_pressure_water = relative_humidity * Water.equilibrium_vapour_pressure(temperature) mole_fraction_water = vapour_pressure_water / pressure molar_mass = (1-mole_fraction_water) * molar_mass_dry_air + mole_fraction_water * Water.molar_mass return Environment(Water, molar_mass, pressure, temperature, relative_humidity, specific_heat_capacity_air(temperature), thermal_conductivity_air(temperature), dynamic_viscosity_air(temperature), velocity)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_atmospheric_state(self, atmosphere, t_surface):\n atm_fields_compact = atmosphere.to_atm_fields_compact()\n\n # Scale dry air VMRs with water content\n vmr_h2o = atm_fields_compact.get(\"abs_species-H2O\")\n total_vmr = vmr_h2o[0]\n for species in atm_fields_compact.grids[0]:\n if species.startswith(\"abs_species-\") and \"H2O\" not in species:\n atm_fields_compact.scale(species, 1 - vmr_h2o)\n total_vmr += atm_fields_compact.get(species)[0]\n\n # Compute the N2 VMR as a residual of the full atmosphere composition.\n n2 = ty.arts.types.GriddedField3(\n grids=atm_fields_compact.grids[1:],\n data=1 - total_vmr,\n )\n\n self.ws.atm_fields_compact = atm_fields_compact\n self.ws.atm_fields_compactAddSpecies(\n atm_fields_compact=self.ws.atm_fields_compact,\n name=\"abs_species-N2\",\n value=n2,\n )\n self.ws.AtmFieldsAndParticleBulkPropFieldFromCompact()\n self.ws.vmr_field = self.ws.vmr_field.value.clip(min=0)\n\n # Surface & TOA\n # Add pressure layers to the surface and top-of-the-atmosphere to\n # ensure consistent atmosphere boundaries between ARTS and RRTMG.\n self.ws.t_surface = np.array([[t_surface]])\n self.ws.z_surface = np.array([[0.0]])\n self.ws.z_field.value[0, 0, 0] = 0.0\n\n # Perform configuration and atmosphere checks\n self.ws.atmfields_checkedCalc(bad_partition_functions_ok=1)\n self.ws.propmat_clearsky_agenda_checkedCalc()\n self.ws.atmgeom_checkedCalc()\n self.ws.cloudbox_checkedCalc()", "def make_atmosphere(self):\n\t\traise NotImplementedError()", "def __init__(self):\n\n self.Cp_air0 = config_earth.earth_properties['Cp_air0']\n self.Rsp_air = config_earth.earth_properties['Rsp_air']\n\n self.d = config_earth.balloon_properties['d']\n self.vol = math.pi*4/3*pow((self.d/2),3) #volume m^3\n self.surfArea = math.pi*self.d*self.d #m^2\n self.cs_area = math.pi*self.d*self.d/4.0 #m^2\n\n #self.emissEnv = config_earth.balloon_properties['emissEnv']\n self.areaDensityEnv = config_earth.balloon_properties['areaDensityEnv']\n self.mp = config_earth.balloon_properties['mp']\n self.mdot = 0\n self.massEnv = config_earth.balloon_properties['mEnv']\n self.Upsilon = config_earth.balloon_properties['Upsilon']\n\n self.vent = config_earth.simulation['vent']\n self.coord = config_earth.simulation['start_coord']\n self.t = config_earth.simulation['start_time']\n self.lat = math.radians(self.coord['lat'])\n self.Ls = self.t.timetuple().tm_yday\n self.min_alt = config_earth.simulation['min_alt']\n\n self.vm_coeff = .1 #virtual mass coefficient\n self.k = self.massEnv*config_earth.balloon_properties['cp'] #thermal mass coefficient\n\n self.dt = config_earth.dt", "def __init__(self, alt=0, temp_offset=0):\n\t\tWorkingAtmosphere.__init__(self, alt)\n\t\t#self.temperature_offset = tOffset\n\t\tself.Temperature_offset = temp_offset\n\t\tself.make_environment()", "def prepare_atmosphere(overwrite=True):\n path = astropy.utils.data._find_pkg_data_path('../data/atmosphere.fits')\n if not overwrite and os.path.exists(path):\n print('Atmosphere file exists and overwrite is False.')\n return\n # Specify how SkyCalc will be called.\n params = dict(\n airmass=1.,\n season=0, # annual average\n time=0, # nightly average\n vacair='vac', # vaccuum wavelengths\n wmin=300., # nm\n wmax=1100., # nm\n wdelta=0.01, # nm\n observatory='2640', # paranal\n incl_starlight='N',\n incl_moon='N',\n incl_zodiacal='N',\n incl_airglow='Y'\n )\n print('Calling the ESO SkyCalc...')\n t = skysim.eso.get_skycalc(params)\n # Build a new table with only the columns we need.\n tnew = astropy.table.Table()\n tnew['wavelength'] = astropy.table.Column(\n 1e3 * t['lam'].data, description='Vacuum wavelength in nm',\n unit='nm')\n tnew['trans_ma'] = astropy.table.Column(\n t['trans_ma'].data,\n description='Zenith transmission frac for molecular absorption')\n tnew['trans_o3'] = astropy.table.Column(\n t['trans_o3'].data,\n description='Zenith transmission fraction for ozone absorption')\n # Undo absorption and scattering extinction of airglow.\n ael = t['flux_ael']\n arc = t['flux_arc']\n rs = t['trans_rs']\n ms = t['trans_ms']\n ma = t['trans_ma']\n nonzero = ma > 0\n arc[nonzero] /= ma[nonzero]\n ael[nonzero] /= ma[nonzero]\n fR, fM = skysim.airglow.airglow_scattering(0)\n scattering = rs ** fR * ms ** fM\n arc /= scattering\n ael /= scattering\n # Convert from flux density per um to per nm and save.\n tnew['airglow_cont'] = astropy.table.Column(\n 1e-3 * arc, description='Unextincted airglow continuum',\n unit='ph / (arcsec2 m2 s nm)')\n tnew['airglow_line'] = astropy.table.Column(\n 1e-3 * ael, description='Unextincted airglow narrow lines',\n unit='ph / (arcsec2 m2 s nm)')\n # Save the new table.\n tnew.write(path, overwrite=overwrite)\n print(f'Wrote {len(tnew)} rows to {path}')", "def make_environment(self):\n\t\tbase_layer = 0\n\t\tself.Gravity = 9.81\n\n\t\t#Private data for to define model\n\t\t__model_max_altitude = 87000\n\t\t__atmosphere_layers = {0:0, 11000:1, 20000:2, 32000:3, 47000:4, 51000:5, 71000:6}\n\t\t__layer_base_data = {\n\t\t\t0:{'temp':288.15, 'lapse':-0.0065, 'press':101325},\n\t\t\t1:{'temp':216.65, 'lapse':0, 'press':22632.1},\n\t\t\t2:{'temp':216.65, 'lapse':0.001, 'press':5474.89},\n\t\t\t3:{'temp':228.65, 'lapse':0.0028, 'press':868.019},\n\t\t\t4:{'temp':270.65, 'lapse':0, 'press':110.906},\n\t\t\t5:{'temp':270.65, 'lapse':-0.0028, 'press':66.9389},\n\t\t\t6:{'temp':214.65, 'lapse':-0.002, 'press':3.95642},\n\t\t\t}\n\t\t__gas_constant = 8.31432#e3\n\t\t__air_molar_mass = 0.0289644\n\t\t__specific_heat_ratio = 1.4\n\t\t__visc_lambda = 1.51204129e-6\n\t\t__visc_sutherland_const = 120.0\n\n\t\tif self.Altitude > __model_max_altitude:\n\t\t\traise helpers.extra_exceptions.ModelExtrapolationException(\n\t\t\t'Exceeded model maximum altitude')\n\n\t\tlayerKeys = __atmosphere_layers.keys()\n\t\tlayerKeys = list(layerKeys)\n\t\tlayerKeys.sort()\n\t\tfor layer in layerKeys:\n\t\t\tif self.Altitude >= layer:\n\t\t\t\tbase_layer = __atmosphere_layers[layer]\n\t\t\t\tbase_alt = layer\n\t\tbase_temp = __layer_base_data[base_layer]['temp']\n\t\tbase_lapse = __layer_base_data[base_layer]['lapse']\n\t\tbase_press = __layer_base_data[base_layer]['press']\n\n\t\tself.Temperature = base_temp + base_lapse * (self.Altitude - base_alt)\n\t\t+ self.Temperature_offset\n\n\t\tif base_lapse == 0:\n\t\t\tself.Pressure = base_press * \\\n\t\t\t\tnp.exp( (-self.Gravity*__air_molar_mass*(self.Altitude-base_alt)) \\\n\t\t\t\t/(__gas_constant*base_temp))\n\t\telse:\n\t\t\tself.Pressure = base_press * \\\n\t\t\t\t(base_temp/self.Temperature) ** \\\n\t\t\t\t(self.Gravity*__air_molar_mass/__gas_constant/base_lapse)\n\n\t\tself.Density = __air_molar_mass*self.Pressure / \\\n\t\t\t__gas_constant/self.Temperature\n\t\tself.Speed_of_sound = np.sqrt(__specific_heat_ratio*__gas_constant* \\\n\t\t\tself.Temperature/__air_molar_mass)\n\t\tself.Dynamic_viscosity = __visc_lambda*self.Temperature**(3.0/2.0)/ \\\n\t\t\t(self.Temperature+__visc_sutherland_const)", "def checkEnvironment(ontology_environment):\n ontology_time_of_day = ontology_environment.has_time_of_day[0] #get the TimeOfDay individual in the ontology \n #Check TimeOfDay property assertions in the ontology and create the PYOSCX TimeOfDay accordingly.\n if len(ontology_time_of_day.has_animation) != 0:\n animation = ontology_time_of_day.has_animation[0]\n if len(ontology_time_of_day.has_year) != 0:\n year = ontology_time_of_day.has_year[0]\n if len(ontology_time_of_day.has_month) != 0:\n month = ontology_time_of_day.has_month[0] \n if len(ontology_time_of_day.has_day) != 0:\n day = ontology_time_of_day.has_day[0] \n if len(ontology_time_of_day.has_hour) != 0:\n hour = ontology_time_of_day.has_hour[0]\n if len(ontology_time_of_day.has_minute) != 0:\n minute = ontology_time_of_day.has_minute[0]\n if len(ontology_time_of_day.has_second) != 0:\n second = ontology_time_of_day.has_second[0]\n xosc_time_of_day = xosc.TimeOfDay(animation,year,month,day,hour,minute,second)\n #Check Weather property assertions in the ontology and create the PYOSCX Weather accordingly.\n ontology_weather = ontology_environment.has_weather[0] #get the Weather individual in the ontology\n if len(ontology_weather.has_cloud_state) != 0:\n xosc_cloud_state = checkCloudState(ontology_weather.has_cloud_state[0])\n if len(ontology_weather.has_fog) !=0:\n xosc_fog = checkFog(ontology_weather.has_fog[0])\n if len(ontology_weather.has_sun) !=0:\n xosc_sun = checkSun(ontology_weather.has_sun[0])\n if len(ontology_weather.has_precipitation) !=0:\n xosc_precipitation = checkPrecipitation(ontology_weather.has_precipitation[0])\n xosc_weather = xosc.Weather(xosc_cloud_state,sun = xosc_sun, fog = xosc_fog, precipitation = xosc_precipitation)\n #Check RoadCondtion property assertions in the ontology and create the PYOSCX RoadCondition accordingly.\n ontology_road_condition = ontology_environment.has_road_condition[0] #get the RoadCondition individual in the ontology\n if len(ontology_road_condition.has_friction_scale_factor) !=0:\n friction_scale_factor = ontology_road_condition.has_friction_scale_factor[0]\n xosc_road_condition = xosc.RoadCondition(friction_scale_factor)\n environment_name = getNameFromIRI(ontology_environment.iri)\n return xosc.Environment(environment_name,xosc_time_of_day,xosc_weather,xosc_road_condition)", "def initial_conditions(self):\n e = self.get_property_all_planets('e')\n pi = self.get_property_all_planets('pi')*np.pi/180\n i = self.get_property_all_planets('i')*np.pi/180\n omega = self.get_property_all_planets('Omega')*np.pi/180\n\n h = np.array(e*np.sin(pi), dtype='complex128')\n k = np.array(e*np.cos(pi), dtype='complex128')\n p = np.array(i*np.sin(omega), dtype='complex128') # WHY DIVIDE BY 2 TO MATCH LASKAR 1986\n q = np.array(i*np.cos(omega), dtype='complex128')\n\n return h, k, p, q", "def update_conditions(self) -> None:\n self.log.debug(\"Updating conditions.\")\n\n self.models[\"sky\"].update(self.models[\"observatory_state\"].time)\n\n if self.is_night is None:\n self.log.debug(\"Driver not initialized yet. Computing night parameters.\")\n # Driver was not initialized yet. Need to compute night\n # boundaries\n\n (self.current_sunset, self.current_sunrise) = self.models[\n \"sky\"\n ].get_night_boundaries(self.parameters.night_boundary)\n\n self.is_night = (\n self.current_sunset\n <= self.models[\"observatory_state\"].time\n < self.current_sunrise\n )\n\n self.log.debug(\n f\"Sunset/Sunrise: {self.current_sunset}/{self.current_sunrise}, \"\n f\"sun @ {self.parameters.night_boundary} degrees.\"\n )\n\n is_night = self.is_night\n\n self.is_night = (\n self.current_sunset\n <= self.models[\"observatory_state\"].time\n < self.current_sunrise\n )\n\n # Only compute night boundaries when we transition from nighttime to\n # daytime. Possibilities are:\n # 1 - self.is_night=True and is_night = True: During the night (no need\n # to compute anything).\n # 2 - self.is_night=False and is_night = True: Transitioned from\n # night/day (need to recompute night boundaries).\n # 3 - self.is_night=True and is_night = False: Transitioned from\n # day/night (no need to compute anything).\n # 4 - self.is_night=False and is_night = False: During the day, no need\n # to compute anything.\n if not self.is_night and is_night:\n self.log.debug(\n \"Night over. Computing next night boundaries. \"\n f\"Assuming sun elevation of {self.parameters.night_boundary}.\"\n )\n self.night += 1\n (self.current_sunset, self.current_sunrise) = self.models[\n \"sky\"\n ].get_night_boundaries(self.parameters.night_boundary)\n\n self.log.debug(\n f\"[{self.night}]: Sunset/Sunrise: {self.current_sunset}/{self.current_sunrise} \"\n )", "def initial_conditions(self):\n pass", "def setup_class(self):\n self.dset = read_ww3(os.path.join(FILES_DIR, \"ww3file.nc\"))\n # First two sites are exact matches, third site is in between\n self.lons = [92.00, 92.10, 92.05]\n self.lats = [19.80, 19.95, 19.88]\n self.lons_exact = self.lons[:2]\n self.lats_exact = self.lats[:2]\n self.lons_inexact = self.lons[-1:]\n self.lats_inexact = self.lats[-1:]", "def _setup_conditions(self):\n conds = self._hyperparams['conditions']\n for field in ('x0', 'x0var', 'pos_body_idx', 'pos_body_offset',\n 'noisy_body_idx', 'noisy_body_var', 'taskname'):\n self._hyperparams[field] = setup(self._hyperparams[field], conds)", "def _update_environment(self, environment: Environment) -> None:\n e_vals, f_vals = environment.values, self.farm_status.values\n\n self._generic_update(e_vals, f_vals, 'water_temp', 'water_heater', 'water_cooler') # only works if hydroponic\n self._generic_update(e_vals, f_vals, 'pH', 'ph_up', 'ph_down') # Handles either soil or water ph\n self._generic_update(e_vals, f_vals, 'air_temp', 'air_heater', 'air_cooler')\n self._generic_update(e_vals, f_vals, 'co2', 'co2_up', 'co2_down')\n self._generic_update(e_vals, f_vals, 'humidity', 'humidifier', 'dehumidifier')\n self._generic_update(e_vals, f_vals, 'soil_moisture', 'water_soil', None) # only works if soil based\n\n self._always_set(environment.values['circulation_fan'], 'circulation_fan')\n self._always_set(environment.values['lux'], 'lights')", "def get_environment_actions(self, init_act):\n # Set initial values for environment variables\n time_of_day = \"2020-10-23T06:00:00\"\n time_animation = \"false\"\n cloud_state = \"free\"\n fog_range = \"100000\"\n sun_intensity = \"0.85\"\n sun_azimuth = \"0\"\n sun_elevation = \"1.31\"\n percip_type = \"dry\"\n percip_intensity = \"0\"\n\n try:\n env_layer = QgsProject.instance().mapLayersByName(\"Environment\")[0]\n for feature in env_layer.getFeatures():\n time_of_day = feature[\"Datetime\"]\n time_animation = str(feature[\"Datetime Animation\"]).lower()\n cloud_state = feature[\"Cloud State\"]\n fog_range = str(feature[\"Fog Visual Range\"])\n sun_intensity = str(feature[\"Sun Intensity\"])\n sun_azimuth = str(feature[\"Sun Azimuth\"])\n sun_elevation = str(feature[\"Sun Elevation\"])\n percip_type = feature[\"Precipitation Type\"]\n percip_intensity = str(feature[\"Precipitation Intensity\"])\n except IndexError:\n error_message = \"No environment variables detected, using defaults\"\n iface.messageBar().pushMessage(\"Info\", error_message, level=Qgis.Info)\n QgsMessageLog.logMessage(error_message, level=Qgis.Info)\n self._warning_message.append(f\"Info: {error_message}\")\n\n time_of_day = \"2020-10-23T06:00:00\"\n time_animation = \"false\"\n cloud_state = \"free\"\n fog_range = \"100000\"\n sun_intensity = \"0.85\"\n sun_azimuth = \"0\"\n sun_elevation = \"1.31\"\n percip_type = \"dry\"\n percip_intensity = \"0\"\n\n global_act = etree.SubElement(init_act, \"GlobalAction\")\n env_act = etree.SubElement(global_act, \"EnvironmentAction\")\n environ = etree.SubElement(env_act, \"Environment\")\n environ.set(\"name\", \"Environment1\")\n\n env_time = etree.SubElement(environ, \"TimeOfDay\")\n env_time.set(\"animation\", time_animation)\n env_time.set(\"dateTime\", time_of_day)\n\n weather = etree.SubElement(environ, \"Weather\")\n weather.set(\"cloudState\", cloud_state)\n weather_sun = etree.SubElement(weather, \"Sun\")\n weather_sun.set(\"intensity\", sun_intensity)\n weather_sun.set(\"azimuth\", sun_azimuth)\n weather_sun.set(\"elevation\", sun_elevation)\n weather_fog = etree.SubElement(weather, \"Fog\")\n weather_fog.set(\"visualRange\", fog_range)\n weather_percip = etree.SubElement(weather, \"Precipitation\")\n weather_percip.set(\"precipitationType\", percip_type)\n weather_percip.set(\"intensity\", percip_intensity)\n\n env_road = etree.SubElement(environ, \"RoadCondition\")\n env_road.set(\"frictionScaleFactor\", \"1.0\")", "def set_oceanic_modes(self, basis, auto=True):\n if self._atmospheric_basis is None: # Presently, the ocean can not yet be set independently of an atmosphere.\n print('Atmosphere modes not set up. Add an atmosphere before adding an ocean!')\n print('Oceanic setup aborted.')\n return\n\n if auto:\n if self.gotemperature_params is None or isinstance(self.gotemperature_params, GroundTemperatureParams):\n self.gotemperature_params = OceanicTemperatureParams(self.scale_params)\n if self.oceanic_params is None:\n self.oceanic_params = OceanicParams(self.scale_params)\n\n self.ground_params = None\n self._ground_basis = None\n\n self.oceanic_basis = basis\n\n self._oceanic_latex_var_string = list()\n self._oceanic_var_string = list()\n self._ground_latex_var_string = list()\n self._ground_var_string = list()\n for i in range(1, self.nmod[1] + 1):\n self._oceanic_latex_var_string.append(r'psi_{\\rm o,' + str(i) + \"}\")\n self._oceanic_var_string.append(r'psi_o_' + str(i))\n if self.dynamic_T:\n self._oceanic_latex_var_string.append(r', T_{{\\rm o},0}')\n self._oceanic_var_string.append(r'T_o_0')\n for i in range(1, self.nmod[1] + 1):\n self._oceanic_latex_var_string.append(r'delta T_{{\\rm o},' + str(i) + \"}\")\n self._oceanic_var_string.append(r'delta_T_o_' + str(i))", "def __init__(self, zone, value):\n from datamodel import Pressure\n CommonInitialCondition.__init__(self,zone, value,[Pressure])", "def initial_conditions():\n return InitialConditionDomain()", "def set_weather_condition(condition):\n global weather_condition\n\n weather_condition = condition\n\n # Send the new weather condition to all stations.\n if send_status_request([(STAT_CONDITION, weather_condition)]):\n print_log(\"Condition changed to {}\".format(weather_condition))", "def _configure(self):\n InitialCondition._configure(self)", "def apply_weather_values(args, weather):\n if args.azimuth is not None:\n weather.sun_azimuth_angle = args.azimuth\n if args.altitude is not None:\n weather.sun_altitude_angle = args.altitude\n if args.clouds is not None:\n weather.cloudiness = args.clouds\n if args.rain is not None:\n weather.precipitation = args.rain\n if args.puddles is not None:\n weather.precipitation_deposits = args.puddles\n if args.wind is not None:\n weather.wind_intensity = args.wind\n if args.fog is not None:\n weather.fog_density = args.fog\n if args.fogdist is not None:\n weather.fog_distance = args.fogdist\n if args.fogfalloff is not None:\n weather.fog_falloff = args.fogfalloff\n if args.wetness is not None:\n weather.wetness = args.wetness", "def __defaults__(self): \n self.tag = 'Constant-property atmosphere'\n self.composition = Data()\n self.composition.gas = 1.0", "def set_oceans(world_map, sea_level):\n print(\"- Processing oceans\")\n for (x, y), z in np.ndenumerate(world_map):\n if world_map['elevation'][x, y] <= sea_level:\n world_map['water depth'][x, y] = 0.0\n else:\n world_map['water depth'][x, y] = 1.0", "def __init__(self, eqn_set=0, atmosphere=0, ra_steps=(1, 1e3, 40, True),\n kx_steps=(0.01, 1, 40, True), ky_steps=None, threeD=False, atmo_kwargs={}, eqn_args=[],\n eqn_kwargs={}, bc_kwargs={}):\n self._eqn_set = eqn_set\n self._atmosphere = atmosphere\n self._ra_steps = ra_steps\n self._kx_steps = kx_steps\n self._ky_steps = ky_steps\n self.threeD = threeD\n\n self._atmo_kwargs = atmo_kwargs\n self._eqn_args = eqn_args\n self._eqn_kwargs = eqn_kwargs\n self._bc_kwargs = bc_kwargs\n self.cf = CriticalFinder(self.solve_problem, CW)", "def getWaterConditions(self):\n return self._getConditions(restrict=['CS-Eau'])", "def _setup_hydro_radial_initial_conditions(logger, filename,\n initial_condition):\n # Open the file, get needed dimensions\n gridfile = NetCDFFile(filename, 'r+')\n nVertLevels = len(gridfile.dimensions['nVertLevels'])\n # Get variables\n xCell = gridfile.variables['xCell']\n yCell = gridfile.variables['yCell']\n xEdge = gridfile.variables['xEdge']\n yEdge = gridfile.variables['yEdge']\n xVertex = gridfile.variables['xVertex']\n yVertex = gridfile.variables['yVertex']\n thickness = gridfile.variables['thickness']\n bedTopography = gridfile.variables['bedTopography']\n layerThicknessFractions = gridfile.variables['layerThicknessFractions']\n\n # Find center of domain\n x0 = xCell[:].min() + 0.5 * (xCell[:].max() - xCell[:].min())\n y0 = yCell[:].min() + 0.5 * (yCell[:].max() - yCell[:].min())\n # Calculate distance of each cell center from dome center\n r = ((xCell[:] - x0)**2 + (yCell[:] - y0)**2)**0.5\n\n # Center the dome in the center of the cell that is closest to the center\n # of the domain.\n # NOTE: for some meshes, maybe we don't want to do this - could add\n # command-line argument controlling this later.\n putOriginOnACell = True\n if putOriginOnACell:\n centerCellIndex = np.abs(r[:]).argmin()\n xShift = -1.0 * xCell[centerCellIndex]\n yShift = -1.0 * yCell[centerCellIndex]\n xCell[:] = xCell[:] + xShift\n yCell[:] = yCell[:] + yShift\n xEdge[:] = xEdge[:] + xShift\n yEdge[:] = yEdge[:] + yShift\n xVertex[:] = xVertex[:] + xShift\n yVertex[:] = yVertex[:] + yShift\n # Now update origin location and distance array\n x0 = 0.0\n y0 = 0.0\n r = ((xCell[:] - x0)**2 + (yCell[:] - y0)**2)**0.5\n\n # center thickness (m)\n h0 = 500.0\n # sliding velocity at margin (m/s)\n v0 = 100.0 / (3600.0 * 24.0 * 365.0)\n # ideal ice cap radius (m)\n R0 = 25.0e3\n # onset of sliding (m)\n R1 = 5.0e3\n # actual margin location (m)\n L = 0.9 * R0\n\n thickness[0, r < R0] = h0 * (1.0 - (r[r < R0] / R0)**2)\n thickness[0, r > L] = 0.0\n\n # flat bed\n bedTopography[:] = 0.0\n\n # Setup layerThicknessFractions\n layerThicknessFractions[:] = 1.0 / nVertLevels\n\n # melt\n gridfile.variables['basalMeltInput'][:] = 0.0\n # 20 cm/yr as SI mass rate\n gridfile.variables['basalMeltInput'][:] = \\\n 0.2 / (365.0 * 24.0 * 3600.0) * 1000.0\n # Use this line to only add a source term to the center cell - useful for\n # debugging divergence\n\n # value from ramp\n # gridfile.variables['basalMeltInput'][0,r==0.0] = 4.0e-10 * 1000.0 *100\n\n # velocity\n gridfile.variables['uReconstructX'][:] = 0.0\n velo = v0 * (r - R1)**5 / (L - R1)**5\n velo[r < R1] = 0.0\n gridfile.variables['uReconstructX'][0, :, -1] = velo\n gridfile.variables['uReconstructX'][0, thickness[0, :] == 0.0, :] = 0.0\n\n if initial_condition == 'zero':\n logger.info(\"Using 'zero' option for initial condition.\")\n # set some small initial value to keep adaptive time stepper from\n # taking a huge time step initially\n gridfile.variables['waterThickness'][0, :] = 0.01\n gridfile.variables['waterPressure'][0, :] = 0.0\n elif initial_condition == 'exact':\n logger.info(\"Using 'exact' option for initial condition.\")\n # IC on thickness\n # import exact solution\n fnameSoln = 'near_exact_solution_r_P_W.txt'\n soln = np.loadtxt(fnameSoln, delimiter=',')\n rsoln = soln[:, 0]\n Psoln = soln[:, 1]\n Wsoln = soln[:, 2]\n\n Wmpas = np.interp(r, rsoln, Wsoln) # apply exact solution\n Wmpas[np.isnan(Wmpas)] = 0.0\n gridfile.variables['waterThickness'][0, :] = Wmpas\n\n # IC on water pressure\n # apply exact solution\n Pmpas = np.interp(r, rsoln, Psoln)\n Pmpas[np.isnan(Pmpas)] = 0.0\n gridfile.variables['waterPressure'][0, :] = Pmpas\n else:\n raise ValueError(\"Unknown initial condition type specified \"\n \"{}.\".format(initial_condition))\n\n gridfile.close()\n\n logger.info('Successfully added hydro_radial initial conditions to: '\n '{}'.format(filename))", "def initConfig(self):\n\n # set observer position to last one first, to greenwich if not known\n lat = self.config.get('topoLat', 51.47)\n lon = self.config.get('topoLon', 0)\n elev = self.config.get('topoElev', 46)\n topo = skyfield.api.Topos(longitude_degrees=lon,\n latitude_degrees=lat,\n elevation_m=elev)\n\n config = self.config.get('mainW', {})\n if config.get('loglevelDeepDebug', True):\n level = 'DEBUG'\n elif config.get('loglevelDebug', True):\n level = 'INFO'\n else:\n level = 'WARN'\n setCustomLoggingLevel(level)\n\n return topo", "def setUp(self):\n premask = np.array([[0.0, 3.0, 2.0], [0.5, 0.0, 1.5], [0.2, 0.0, 0]])\n self.mask = np.ma.masked_where(premask > 1.0, premask)\n\n self.x_coord = DimCoord([1, 2, 3], long_name=\"longitude\")\n self.y_coord = DimCoord([1, 2, 3], long_name=\"latitude\")\n self.coords = [self.x_coord, self.y_coord]\n self.upper = 100.0\n self.lower = 0.0\n self.units = \"m\"", "def __init__(self, x, y, z): \n\t\tself.x = x # x coordinate (EW distance from observatory center)\n\t\tself.y = y # y coordinate (NS distance from observatory center)\n\t\tself.z = z # z coordinate (altitude rel. to observatory center)", "def setup(self):\n insts = []\n for i in range(5):\n insts.append(pysat.Instrument('pysat', 'testing',\n clean_level='clean'))\n self.testC = pysat.Constellation(instruments=insts)\n self.testI = pysat.Instrument('pysat', 'testing', clean_level='clean')\n self.bounds = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n\n # Apply bounds to all Instruments in Constellation, and solo Instrument.\n self.testC.bounds = self.bounds\n self.testI.bounds = self.bounds\n\n # Define variables for 1D testing\n self.one_d_vars = ['dummy1', 'dummy2', 'dummy3']\n self.unequal_one_d_vars = []\n\n return", "def set_ground_modes(self, basis=None, auto=True):\n if self._atmospheric_basis is None: # Presently, the ground can not yet be set independently of an atmosphere.\n print('Atmosphere modes not set up. Add an atmosphere before adding the ground!')\n print('Ground setup aborted.')\n return\n\n if auto:\n if self.gotemperature_params is None or isinstance(self.gotemperature_params, OceanicTemperatureParams):\n self.gotemperature_params = GroundTemperatureParams(self.scale_params)\n if self.ground_params is None:\n self.ground_params = GroundParams(self.scale_params)\n\n self.oceanic_params = None\n self._oceanic_basis = None\n\n if basis is not None:\n self.ground_basis = basis\n else:\n self.ground_basis = self._atmospheric_basis\n\n self._oceanic_var_string = list()\n self._oceanic_latex_var_string = list()\n self._ground_latex_var_string = list()\n self._ground_var_string = list()\n if self.dynamic_T:\n self._oceanic_latex_var_string.append(r', T_{{\\rm g},0}')\n self._oceanic_var_string.append(r'T_g_0')\n for i in range(1, self.nmod[1] + 1):\n self._ground_latex_var_string.append(r'delta T_{\\rm g,' + str(i) + \"}\")\n self._ground_var_string.append(r'delta_T_g_' + str(i))", "def rainfall_event(self):\n\n # assign local variables\n datatype = 'strds'\n increment = str(self.rain_interval)+' minutes'\n raster = 'raster'\n iterations = int(self.rain_duration)/int(self.rain_interval)\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n\n # create raster space time datasets\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n rain_duration=self.rain_duration,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # determine mode and run model\n if self.mode == 'simwe_mode':\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model\n # as a series of rainfall intervals in a rainfall event\n i = 1\n while i < iterations:\n\n # update the elevation\n evol.elevation = evolved_elevation\n print evol.elevation\n\n # update time\n evol.start = time\n print evol.start\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=self.rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n i = i+1\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"={evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def __init__(self, body, observer, date):\n self.site = observer.site\n self.body = body\n self.date = observer.date_to_local(date)\n self.date_utc = observer.date_to_utc(self.date)\n\n self.humidity = observer.humidity\n self.wavelength = observer.wavelength\n\n # Can/should this calculation be postponed?\n self.site.date = ephem.Date(self.date_utc)\n self.body.compute(self.site)\n\n self.lt = self.date\n self.ra = self.body.ra\n self.dec = self.body.dec\n self.alt = float(self.body.alt)\n self.az = float(self.body.az)\n # TODO: deprecate\n self.alt_deg = math.degrees(self.alt)\n self.az_deg = math.degrees(self.az)\n self.will_be_visible = not self.body.neverup\n\n # properties\n self._ut = None\n self._gmst = None\n self._lmst = None\n self._ha = None\n self._pang = None\n self._am = None\n self._moon_alt = None\n self._moon_pct = None\n self._moon_sep = None\n self._atmos_disp = None\n\n # Conversion factor for wavelengths (Angstrom -> micrometer)\n self.angstrom_to_mm = 1. / 10000.", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean')\n self.bounds1 = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n self.bounds2 = (dt.datetime(2009, 1, 1), dt.datetime(2009, 1, 2))\n\n return", "def __init__(self,\n launchSiteLat,\n launchSiteLon,\n launchSiteElev,\n dateAndTime,\n soundingFile,\n timeFromSounding,\n distanceFromSounding,\n inflationTemperature=0.0,\n UTC_offset=0.,\n debugging=False,\n load_on_init=False):\n # Initialize sounding-specific variables\n self.distanceFromSounding = distanceFromSounding\n self.timeFromSounding = timeFromSounding\n self.maxAltitude = 50999\n self.soundingFile = soundingFile\n\n self._interpolationPrecision = 200\n\n # Run the environment class initialization first\n super(soundingEnvironment, self).__init__(\n inflationTemperature=inflationTemperature,\n launchSiteLat=launchSiteLat,\n launchSiteLon=launchSiteLon,\n launchSiteElev=launchSiteElev,\n dateAndTime=dateAndTime,\n UTC_offset=UTC_offset,\n debugging=debugging,\n load_on_init=load_on_init)", "def setup_observatory( obs ):\n\n obs_obj = ephem.Observer()\n\n # Pre-defined observatory with string identifier:\n if type( obs )==str:\n obs_db = observatories()\n try:\n obs_dict = obs_db[ obs ]\n obs_obj.lat = obs_dict['lat']\n obs_obj.long = obs_dict['long']\n obs_obj.elevation = obs_dict['altitude-metres']\n timezone = obs_dict['timezone']\n except:\n print '\\n\\nObservatory string does not match any in database!'\n print 'Currently available observatories are:'\n for i in obs_db.keys():\n print ' {0}'.format( i )\n obs_obj = None\n timezone = None\n\n # Custom-defined observatory as dictionary:\n else:\n obs_obj.lat = obs['lat']\n obs_obj.long = obs['long']\n try:\n obs_obj.elevation = obs['altitude-metres']\n except:\n print 'No elevation provided - assuming sea level'\n obs_obj.elevation = 0.\n try:\n timezone = obs['timezone']\n except:\n timezone = None\n\n return obs_obj, timezone", "def setup(self):\n insts = []\n for i in range(2):\n r_date = dt.datetime(2009, 1, i + 1)\n insts.append(pysat.Instrument('pysat', 'testing',\n clean_level='clean',\n root_date=r_date))\n self.testC = pysat.Constellation(instruments=insts)\n self.testI = pysat.Instrument('pysat', 'testing', clean_level='clean')\n self.bounds = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n\n # Apply bounds to all Instruments in Constellation, and solo Instrument.\n self.testC.bounds = self.bounds\n self.testI.bounds = self.bounds\n\n # Define variables for 1D testing. A more limited set that only\n # depends upon 'mlt'. Other variables also include longitude, which\n # can differ between instruments when only binning by 'mlt'.\n self.one_d_vars = ['dummy1']\n self.unequal_one_d_vars = ['dummy2', 'dummy3']\n\n return", "def __init__(self, easting=0.0, northing=0.0, altitude=0.0,\n zone_number=None, zone_letter=None):\n self.easting = easting\n self.northing = northing\n self.altitude = altitude\n self.zone_number = zone_number\n self.zone_letter = zone_letter", "def set_up_all_ao(self):\n self.set_as_active()\n \n # sets up ambient occlusion lighting\n self.set_up_world_ao()\n self.comp_add_ao()", "def initializeDomainCondition(self):\n print('Initialize the condition.')\n\n self.fluidPDF = np.zeros([self.typesFluids, self.ny, self.nx, 9])\n self.fluidsDensity = np.zeros([self.typesFluids, self.ny, self.nx])\n self.physicalVX = np.zeros([self.ny, self.nx])\n self.physicalVY = np.zeros([self.ny, self.nx])\n self.forceX = np.zeros([self.typesFluids, self.ny, self.nx])\n self.forceY = np.zeros([self.typesFluids, self.ny, self.nx])\n if (self.PictureExistance == \"'no'\"):\n for i in sp.arange(self.ny):\n for j in sp.arange(self.nx):\n# for k in sp.arange(self.typesFluids):\n tmpCenterX = int(self.nx / 2); tmpCenterY = int(self.ny / 2)\n if (self.isDomain[i, j] == True):\n# if (sp.sqrt((i - tmpCenterY) * (i - tmpCenterY) + (j - \\\n# tmpCenterX) * (j - tmpCenterX)) <= 15.):\n# if (i < 15 and np.abs(j - tmpCenterX) < 15):\n# if ((i >0 and i < 28) and (j >=102 and j < 154)):\n if (i < self.ny - 10):\n# if (i < 128 and i > 70):\n self.fluidsDensity[0, i, j] = self.initialDensities[0]\n self.fluidPDF[0, i, j, :] = self.weightsCoeff * self.initialDensities[0]\n self.fluidsDensity[1, i, j] = self.backgroundDensities[1]\n self.fluidPDF[1, i, j, :] = self.weightsCoeff * self.backgroundDensities[1]\n else:\n self.fluidsDensity[1, i, j] = self.initialDensities[1]\n self.fluidPDF[1, i, j, :] = self.weightsCoeff * self.initialDensities[1]\n self.fluidsDensity[0, i, j] = self.backgroundDensities[0]\n self.fluidPDF[0, i, j, :] = self.weightsCoeff * self.backgroundDensities[0] \n \n if (self.isCycles == \"'no'\" and self.PictureExistance == \"'yes'\"):\n for i in sp.arange(self.ny):\n for j in sp.arange(self.nx):\n if (i < self.ny - 20):\n # if ( np.abs(i - 60) < 20):\n for k in sp.arange(self.typesFluids):\n if (k == 0 and self.isDomain[i, j] == 1):\n self.fluidPDF[k, i, j, :] = self.initialDensities[k] * self.weightsCoeff\n self.fluidsDensity[k, i, j] = self.initialDensities[k]\n if (k == 1 and self.isDomain[i, j] == 1):\n self.fluidPDF[k, i, j, :] = self.backgroundDensities[k] * self.weightsCoeff\n self.fluidsDensity[k, i, j] = self.backgroundDensities[k]\n else:\n for k in sp.arange(self.typesFluids):\n if (k == 0 and self.isDomain[i, j] == 1):\n self.fluidPDF[k, i, j, :] = self.backgroundDensities[k] * self.weightsCoeff\n self.fluidsDensity[k, i, j] = self.backgroundDensities[k]\n if (k == 1 and self.isDomain[i, j] == 1):\n self.fluidPDF[k, i, j, :] = self.initialDensities[k] * self.weightsCoeff\n self.fluidsDensity[k, i, j] = self.initialDensities[k]\n elif (self.isCycles == \"'yes'\" and self.PictureExistance == \"'yes'\"):\n username = getpass.getuser()\n pathIniFile = '/home/' + username + '/LBMInitial/'\n if (os.path.exists(pathIniFile) == True): \n #for the old fluid distribution\n #the domain of the network\n iniFile = tb.open_file(pathIniFile + 'SimulationResults.h5', 'r')\n for i in sp.arange(self.typesFluids-1):\n self.fluidsDensity[i, :-30, :] = eval('iniFile.root.FluidMacro.FluidDensityType%gin%d[:-30, :]' % (i, self.lastStep))\n self.fluidsDensity[i, -30:, :] = self.backgroundDensities[i]\n for j in sp.arange(self.ny):\n for k in sp.arange(self.nx):\n self.fluidPDF[i, j, k, :] = self.weightsCoeff * \\\n self.fluidsDensity[i, j, k]\n iniFile.close()\n# for the new fluid in the domain\n for i in sp.arange(self.ny):\n for j in sp.arange(self.nx):\n if (i < self.ny - 30 and self.isDomain[i, j] == 1):\n self.fluidsDensity[-1, i, j] = self.backgroundDensities[-1]\n self.fluidPDF[-1, i, j, :] = self.backgroundDensities[-1] * \\\n self.weightsCoeff\n# continue\n elif (i >= self.ny - 30 and self.isDomain[i, j] == 1):\n self.fluidsDensity[-1, i, j] = self.initialDensities[-1]\n self.fluidPDF[-1, i, j, :] = self.initialDensities[-1] * \\\n self.weightsCoeff\n else:\n print(\"There is no file for initializing the domain.\")\n sys.exit()", "def setDefaults(self) -> None:\n self.night_boundary = -12.0\n self.new_moon_phase_threshold = 20.0", "def rebuild_models(self):\n\n zmin = self._domain.z.lbound\n zmax = self._domain.z.rbound\n\n xmin = self._domain.x.lbound\n xmax = self._domain.x.rbound\n\n grid = self.mesh.mesh_coords()\n\n # the small number is added to prevent undesireable numerical effects\n air_depth = (1e-8 + 2.0/15.0) * (zmax - zmin) + zmin\n rock_bottom = 13.0/15.0 * (zmax - zmin) + zmin\n\n coast_left = 3.0/25.0 * (xmax - xmin) + xmin\n coast_right = 13.0/25.0 * (xmax - xmin) + xmin\n\n max_depth = zmax\n\n # Set up air layer\n if self._domain.dim == 2:\n n = (0., 1.)\n p = (coast_right, air_depth)\n else: # domain.dim == 3\n n = (0.0, 0.0, 1.0)\n p = (coast_right, coast_right, air_depth)\n\n air_plane = ImplicitPlane(p,n)\n air = air_plane\n\n # Set up rock layer\n if self._domain.dim == 2:\n n = (coast_right - coast_left, -(1.0 - air_depth))\n p = (coast_right, max_depth)\n n2 = (0., -1.)\n p2 = (0., rock_bottom)\n else: # domain.dim == 3\n n = (coast_right - coast_left, 0.0, -(1.0 - air_depth))\n p = (coast_right, 0.0, max_depth)\n n2 = (0., 0., -1.)\n p2 = (0., 0., rock_bottom)\n\n rock_plane = ImplicitPlane(p,n)\n rock_plane2 = ImplicitPlane(p2,n2)\n\n rock = ImplicitDifference(ImplicitUnion(rock_plane, rock_plane2), air_plane)\n\n C0 = air.interior(grid, True) * self.air_velocity + \\\n rock.interior(grid, True) * self.rock_velocity\n\n C0[np.where(C0 == 0.0)] = self.water_velocity\n\n submarine = self.submarine\n\n if submarine is not None:\n sub = submarine.implicit_surface\n\n C = air.interior(grid, True) * self.air_velocity + \\\n rock.interior(grid, True) * self.rock_velocity + \\\n sub.interior(grid, True) * submarine.velocity\n\n C[np.where(C == 0.0)] = self.water_velocity\n\n else:\n C = C0.copy()\n\n C.shape = self._mesh.shape()\n C0.shape = self._mesh.shape()\n\n self._true_model = C\n self._initial_model = C0", "def setupTown(self):\n\t\t# create a test square to determine participant distance\n\t\tself.vr.resetEnvironment()\n\t\t\n\t\tself.vr.addSkyBox(self.config.blackImage)\n\t\tself.vr.addFloorBox(0.0, -1.0, 0.0, self.config.unitScale, self.config.unitScale, self.config.unitScale,\n\t\t\t\t\t\tself.config.blackImage, None, self.config.blackImage, None)\n\t\tself.vr.setGravity(0.0, -0.1, 0.0)\n\t\tself.vr.addPlaneGeom(0.0, 1.0, 0.0, 0.0, mu = 0.0)\n\t\tself.vr.addBuildingBox(0.0, 0.95, -0.5, self.config.whiteImage, 0.1, 0.1)", "def do_setup(self, ants): \n log_filter = LogFilter()\n getLogger().addFilter(log_filter)\n\n self.hills = []\n self.directions = []\n\n self.seen = [] #areas that have been seen, use this to avoid repetition\n self.unseen = []\n self.stepped_on = []\n\n self.intent = {}\n self.lc = {} #center of mass for a location\n self.i = {} #number of iterations for an ant\n\n for row in range(ants.rows):\n for col in range(ants.cols):\n self.unseen.append((row, col))\n self.intent[(row,col)] = Intent.GATHER\n\n self.lc[(row,col)] = (-1.0,-1.0) #set up center of mass\n self.i[(row,col)] = -1", "def update_weather(self, weather: Weather) -> None:\n new_weather = carla.WeatherParameters()\n if weather == Weather.SUNSHINE:\n new_weather = carla.WeatherParameters.ClearNoon\n elif weather == Weather.RAIN:\n new_weather = carla.WeatherParameters(precipitation=100.0, wetness=100.0)\n elif weather == Weather.FOG:\n new_weather = carla.WeatherParameters(fog_density=100.0, fog_distance=0.0,fog_falloff=0.5)\n self.carla_world.set_weather(new_weather)", "def initializeConditions(self, conditions):\n try:\n new = self.preprocessConditions(conditions)\n evaluate(new) # check if valid\n self.conditions = conditions\n self.check = new\n except:\n try:\n new = self.preprocessConditions(self.conditions)\n evaluate(new)\n self.check = new\n except:\n self.conditions = \"True\"\n self.check = \"lambda x, y, z=0: True\"", "def pick_random_initial_conditions(self, determine_dir = False, determine_mass = False, pos_epsilon=5E-3, vel_espilon=1E-7):\n\n # Pick Sun or Moon System\n if determine_mass:\n if np.random.random() > 0.5:\n self.type = 'Sun'\n self.mu = mu_Earth\n else:\n self.type = 'Moon'\n self.mu = mu_Moon\n else:\n self.type = 'Moon'\n self.mu = mu_Moon\n\n self.eq_points = self.find_lagrange_points(self.mu)\n\n for k, point, in self.eq_points.items():\n self.contour_levels.append(CR3BP.V(point[0], point[1], self.mu))\n\n self.contour_levels.sort()\n\n # pick lagrange point\n lagrange_point = np.random.randint(1,6)\n init_xy = self.eq_points[\"L{}\".format(lagrange_point)]\n self.initial_point = self.eq_points[\"L{}\".format(lagrange_point)]\n self.initial_point_str = \"L{}\".format(lagrange_point)\n\n # pick random offsets from lagrange point and a random velocity in x-y direction\n self.init_conds = np.concatenate([np.random.uniform(init_xy[0] - pos_epsilon, init_xy[0] + pos_epsilon, [1,1]),\n np.random.uniform(init_xy[1] - pos_epsilon, init_xy[1] + pos_epsilon,[1,1]),\n np.random.uniform(-1 * vel_espilon, vel_espilon, [1, 2])], axis=1).reshape(-1)\n\n if determine_dir:\n # Pick to either simulate forwards or backwards in time\n # Forwards in time will show unstable manifolds while backwards in time will show stable manifolds\n if np.random.random() > 0.5:\n # integrate fowards in time\n self.direction = True\n else:\n # integrate backwards in time\n self.direction = False\n else:\n self.direction = True", "def __init__(self, home=False, away=False, night=False, vacation=False):\n self.home, self.away, self.night = home, away, night\n self.vacation = vacation", "def set_up_world_ao(self):\n scene = self.set_as_active()\n new_world = bpy.context.blend_data.worlds.new('World of Wireframe')\n scene.world = new_world\n new_world.light_settings.use_ambient_occlusion = True\n new_world.light_settings.ao_factor = 0.3\n\n renderengine = scene.wirebomb.data_renderengine\n\n if renderengine == 'CYCLES':\n new_world.use_nodes = True\n new_world.node_tree.nodes[1].inputs[0].default_value = (1, 1, 1, 1)\n\n for node in new_world.node_tree.nodes:\n node.select = False\n \n elif renderengine == 'BLENDER_RENDER':\n new_world.horizon_color = (1, 1, 1)", "def conditions(self, conditions):\n\n self._conditions = conditions", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean')\n self.bounds1 = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n self.bounds2 = (dt.datetime(2009, 1, 1), dt.datetime(2009, 1, 2))\n\n self.long_bins = [0., 360., 24]\n self.mlt_bins = [0., 24., 24]\n self.auto_bin = True\n\n return", "def set_conditions(temp=None, humid=None):\n if temp == None:\n print('=== pretty stupid error, not setting temperature on'\n ' set_conditions()')\n thermo.sim_set_conditions(temp, humid)\n my_dict = set_status(None)\n # This will render a normal status frame in HTML\n return my_dict", "def setup(self):\n\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 1, 3))\n\n self.bounds1 = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n self.bounds2 = (dt.datetime(2009, 1, 1), dt.datetime(2009, 1, 2))\n\n self.long_bins = np.linspace(0., 360., 25)\n self.mlt_bins = np.linspace(0., 24., 25)\n\n self.auto_bin = False\n\n return", "def enter(self, requestStatus):\n assert(self.notify.debug(\"enter(requestStatus=\"+str(requestStatus)+\")\"))\n hoodId = requestStatus[\"hoodId\"]\n zoneId = requestStatus[\"zoneId\"]\n\n # start the sky\n newsManager = base.cr.newsManager\n\n if newsManager:\n holidayIds = base.cr.newsManager.getDecorationHolidayId()\n if (ToontownGlobals.HALLOWEEN_COSTUMES in holidayIds) and self.loader.hood.spookySkyFile:\n\n lightsOff = Sequence(LerpColorScaleInterval(\n base.cr.playGame.hood.loader.geom,\n 0.1,\n Vec4(0.55, 0.55, 0.65, 1)),\n Func(self.loader.hood.startSpookySky),\n )\n\n lightsOff.start()\n else:\n # Turn the sky on\n self.loader.hood.startSky()\n lightsOn = LerpColorScaleInterval(\n base.cr.playGame.hood.loader.geom,\n 0.1,\n Vec4(1, 1, 1, 1))\n lightsOn.start()\n else:\n # Turn the sky on\n self.loader.hood.startSky()\n lightsOn = LerpColorScaleInterval(\n base.cr.playGame.hood.loader.geom,\n 0.1,\n Vec4(1, 1, 1, 1))\n lightsOn.start()\n\n self.loader.hood.sky.setFogOff()\n self.__setFaintFog()\n # Turn on the animated props for the estate\n for i in self.loader.nodeList:\n self.loader.enterAnimatedProps(i)\n self.loader.geom.reparentTo(render)\n\n # April toons\n if hasattr(base.cr, \"newsManager\") and base.cr.newsManager:\n holidayIds = base.cr.newsManager.getHolidayIdList()\n if ToontownGlobals.APRIL_FOOLS_COSTUMES in holidayIds:\n self.startAprilFoolsControls()\n\n # leaving or entering the estate via door (i.e. a house door)\n self.accept(\"doorDoneEvent\", self.handleDoorDoneEvent)\n self.accept(\"DistributedDoor_doorTrigger\", self.handleDoorTrigger)\n self.fsm.request(requestStatus[\"how\"], [requestStatus])", "def __init__(self, config):\n AmpioEntity.__init__(self, config)\n\n self._state = STATE_UNKNOWN\n self._armed = set()\n self._alarm = set()\n self._exittime = set()\n self._exittime10 = set()\n self._entrytime = set()\n\n self._home_zones = set()\n self._home_cmd_data: Optional[str] = None\n self._away_zones = set()\n self._away_cmd_data: Optional[str] = None\n self._all_cmd_data: Optional[str] = None\n self._supported_features = 0\n\n if CONF_AWAY_ZONES in self._config:\n self._away_zones = self._config[CONF_AWAY_ZONES]\n self._supported_features |= alarm.SUPPORT_ALARM_ARM_AWAY\n mask = 0\n for zone in self._away_zones:\n mask |= (0x01 << (zone - 1)) & 0xFFFFFFFF\n self._away_cmd_data = mask.to_bytes(4, byteorder=\"little\").hex()\n\n if CONF_HOME_ZONES in self._config:\n self._home_zones = self._config[CONF_HOME_ZONES]\n self._supported_features |= alarm.SUPPORT_ALARM_ARM_HOME\n mask = 0\n for zone in self._home_zones:\n mask |= (0x01 << (zone - 1)) & 0xFFFFFFFF\n self._home_cmd_data = mask.to_bytes(4, byteorder=\"little\").hex()\n\n all_zones = self._home_zones | self._away_zones\n mask = 0\n for zone in all_zones:\n mask |= (0x01 << (zone - 1)) & 0xFFFFFFFF\n self._all_cmd_data = mask.to_bytes(4, byteorder=\"little\").hex()", "def boundary_conditions(self):\n ce = 2 * self.dy * self.g * self.mu * self.m_u / self.kb\n self.e[0, :] = (4 * self.e[1, :] - self.e[2, :]) / (\n ce / self.T[0, :] + 3\n )\n self.rho[0, :] = (\n self.e[0, :]\n * (self.Y - 1)\n * self.mu\n * self.m_u\n / (self.kb * self.T[0, :])\n )\n self.u[0, :] = (4 * self.u[1, :] - self.u[2, :]) / 3\n self.w[0, :] = 0\n\n self.e[-1, :] = (4 * self.e[-2, :] - self.e[-3, :]) / (\n 3 - ce / self.T[-1, :]\n )\n self.rho[-1, :] = (\n self.e[-1, :]\n * (self.Y - 1)\n * self.mu\n * self.m_u\n / (self.kb * self.T[-1, :])\n )\n self.u[-1, :] = (4 * self.u[-2, :] - self.u[-3, :]) / 3\n self.w[-1, :] = 0", "def initialise_fluids(self):\n air = self.air_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n\n for c in self.outl:\n if not c.fluid.val_set[air]:\n c.fluid.val[air] = 0.8\n if not c.fluid.val_set[flue_gas]:\n c.fluid.val[flue_gas] = 0.2\n c.target.propagate_fluid_to_target(c, c.target)", "def __init__(\n self,\n weather,\n us_holidays,\n islamic_holidays,\n jewish_holidays,\n events,\n seahawks,\n huskies,\n sounders,\n ):\n self.weather = weather\n self.us_holidays = us_holidays\n self.islamic_holidays = islamic_holidays\n self.jewish_holidays = jewish_holidays\n self.events = events\n self.seahawks = seahawks\n self.huskies = huskies\n self.sounders = sounders\n self.X = None\n self.y = None", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n _type: str = config[CONF_TYPE]\n name: str = config[CONF_NAME]\n\n if hass.config.latitude < 0:\n hemisphere = SOUTHERN\n elif hass.config.latitude > 0:\n hemisphere = NORTHERN\n else:\n hemisphere = EQUATOR\n\n _LOGGER.debug(_type)\n add_entities([Season(hemisphere, _type, name)], True)", "def test_merged_atmospheres(metallicity=0, gravity=4):\n cast = get_castelli_atmosphere(temperature=8000, \n metallicity=metallicity, gravity=gravity)\n\n ngen = get_nextgen_atmosphere(temperature=8000,\n metallicity=metallicity, gravity=gravity)\n\n # Now Plot the spectra\n py.figure(1)\n py.clf()\n py.loglog(cast.wave, cast.flux, 'r-', label='Castelli')\n py.plot(ngen.wave, ngen.flux, 'b-', label='NextGen')\n py.xlabel('Wavelength')\n py.ylabel('Flux')\n py.legend()\n py.xlim(3000, 50000)\n py.ylim(1e3, 1e8)\n\n\n ngen = get_nextgen_atmosphere(temperature=4000,\n metallicity=metallicity, gravity=gravity)\n\n phoe = get_phoenix_atmosphere(temperature=4000,\n metallicity=metallicity, gravity=gravity)\n # Now Plot the spectra\n py.figure(2)\n py.clf()\n py.loglog(phoe.wave, phoe.flux, 'r-', label='Phoenix')\n py.plot(ngen.wave, ngen.flux, 'b-', label='NextGen')\n py.xlabel('Wavelength')\n py.ylabel('Flux')\n py.legend()\n py.xlim(3000, 50000)\n py.ylim(1, 1e8)", "def setup(self):\n\n warnings.simplefilter(\"always\", DeprecationWarning)\n\n orbit_info = {'index': 'slt', 'kind': 'lt'}\n self.tinst = pysat.Instrument('pysat', 'testing', orbit_info=orbit_info)\n self.tinst.bounds = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 2))\n\n self.warn_msgs = []\n self.war = \"\"\n return", "def conditions():\n pass", "def __init__(self, para, ini_cond):\n\n # grid\n self.z = np.linspace(0, para['grid']['zmax'], para['grid']['Nlayers']) # grid [m] above ground\n self.dz = self.z[1] - self.z[0] # gridsize [m]\n self.ones = np.ones(len(self.z)) # dummy\n self.zref = para['zref'] # height of forcing data [m]\n \n # moss properties\n self.hc = para['hc'] # canopy height (m)\n self.lad = para['lad'] # shoot-area density (m2m-3)\n self.LAI = sum(self.lad*self.dz)\n \n self.canopy_nodes = np.where(self.lad > 0)[0]\n \n # hydraulic\n self.porosity = para['hydraulic']['porosity']\n self.pF = para['hydraulic']['pF']\n self.Ksat = para['hydraulic']['Ksat']\n self.freezing_curve = para['hydraulic']['freezing_curve']\n \n # radiation\n self.albedo = para['radiation'] # 'PAR', 'NIR'\n self.emissivity = para['radiation']['emissivity']\n self.clump = para['radiation']['clumping']\n self.leaf_angle = para['radiation']['leaf_angle']\n \n #self.radiation = para['radiation']\n \n # compute non-dimensional flow velocity Un = U/ust and momentum diffusivity\n Utop = ini_cond['Utop'] # U/ust at zref\n Ubot = 0.0 # no-slip\n self.Sc = para['Schmidt_nr']\n _, self.Un, self.Kmn, _ = closure_model_U_moss(self.z, self.lad, self.hc, Utop, Ubot) \n \n self.U = None\n self.Ks = None\n self.length_scale = para['length_scale']\n \n self.Switch_WMA = False\n \n # initial states\n self.T = ini_cond['T']\n self.Wtot = ini_cond['Wtot']\n self.Wliq, self.Wice, _ = frozen_water(self.T, self.Wot, fp=self.freezing_curve, To=0.0)\n self.h = water_retention(self.pF, theta=self.Wliq)", "def set_constraints(self):\n\n self.config.logger.info(\"Applying base layer land use constraints and prepping future projection constraints...\")\n\n # set start time\n t0 = time.time()\n\n # apply user-defined constraints to base land use layer data and GCAM land use data\n self.cst = ApplyConstraints(self.allreg, self.allaez, self.final_landclasses, self.user_years, self.ixr_idm,\n self.allregaez, self.spat_region, self.allregnumber, self.spat_aez,\n self.gcam_landclasses, self.gcam_regionnumber, self.gcam_aez, self.gcam_landname,\n self.gcam_array, self.gcam_ludata, self.ngrids, self.constraint_names,\n self.observed_landclasses, self.observed_array, self.spat_ludata,\n self.config.constraint_files, self.config.logger)\n\n # apply spatial constraints\n self.spat_ludataharm, self.spat_ludataharm_orig_steps, self.spat_ludataharm_orig = self.cst.apply_spat_constraints()\n\n self.config.logger.info('PERFORMANCE: Constraints applied to projected and spatial data in {0} seconds'.format(time.time() - t0))", "def conditions(self):\n return ConditionCollection(client=self)", "def setgeo(rundata):\n#-------------------\n\n try:\n geodata = rundata.geodata\n except:\n print \"*** Error, this rundata has no geodata attribute\"\n raise AttributeError(\"Missing geodata attribute\")\n\n # == setgeo.data values ==\n geodata.variable_dt_refinement_ratios = True ## Overrides clawdata.inratt, above\n\n geodata.igravity = 1\n geodata.gravity = 9.81\n geodata.icoordsys = 2\n geodata.Rearth = 6367.5e3\n geodata.icoriolis = 0\n\n # == settsunami.data values ==\n geodata.sealevel = 0.\n geodata.drytolerance = 1.e-2\n geodata.wavetolerance = 1.e-1 ##\n geodata.depthdeep = 1.e6 ## Definition of \"deep\" water\n geodata.maxleveldeep = 10 ## Restriction on the number of deep water levels\n geodata.ifriction = 1 ## Friction switch. 0=off, 1=on\n # geodata.coeffmanning =0.0\n geodata.coeffmanning =.025\n geodata.frictiondepth = 10.\n\n #okushiri_dir = '/Users/FrankGonzalez/daily/modeling/tsunami-benchmarks/github/' \\\n #+ 'FrankGonzalez/geoclaw-group/benchmarks/bp09' ##\n okushiri_dir = '..' ## this directory\n \n # == settopo.data values ==\n geodata.topofiles = []\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n # geodata.topofiles.append([1, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/OK24.tt1']) ## 24-s, ~550-740 m Entire Domain (Dmitry's version of Kansai U.)\n geodata.topofiles.append([1, 1, 1, 0, 1.e10, \\\n okushiri_dir + '/OK08.tt1']) ## 8-s, ~184-247 m Okushiri (Dmitry's version of Kansai U.)\n geodata.topofiles.append([1, 1, 1, 0, 1.e10, \\\n okushiri_dir + '/OK03.tt1']) ## 2.67 s (8/3s), ~61-82 m Okushiri (Dmitry's version of Kansai U.)\n geodata.topofiles.append([1, 1, 1, 0., 1.e10, \\\n okushiri_dir + '/AO15.tt1']) ## 0.53-0.89 s, ~16.5-20.4 m, Aonae (Dmitry's version of Kansai U.)\n # geodata.topofiles.append([1, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/MO01.tt1']) ## 0.89 s, ~20-27 m, Monai (Dmitry's version of Kansai U.)\n # geodata.topofiles.append([1, 1, 1, 0., 1.e10, \\\n # okushiri_dir + '/MB05.tt1']) ## 0.13-0.18 s, ~4 m Monai (Dmitry's version of Kansai U.)\n\n # geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/depth40_138.txt']) ## JODC 500 m\n # geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/depth40_140.txt']) ## JODC 500 m\n # geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/depth42_138.txt']) ## JODC 500 m\n # geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/depth42_140.txt']) ## JODC 500 m\n \n # == setdtopo.data values ==\n geodata.dtopofiles = []\n # for moving topography, append lines of the form: (<= 1 allowed for now!)\n # [topotype, minlevel,maxlevel,fname]\n geodata.dtopofiles.append([1,2,3, okushiri_dir + '/HNO1993.txyz']) ## Dmitry N.'s version of Kansai U.\n\n # == setqinit.data values ==\n geodata.iqinit = 0\n geodata.qinitfiles = []\n # for qinit perturbations, append lines of the form: (<= 1 allowed for now!)\n # [minlev, maxlev, fname]\n #geodata.qinitfiles.append([1, 1, 'hump.xyz'])\n\n # == setregions.data values ==\n geodata.regions = []\n # to specify regions of refinement append lines of the form\n # [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]\n # Note: Level 1 = 24 s & Levels [2,3,4,5] = RF [3,3,3,8] => Res of 8 sec to 8/3 sec to 8/9 to 1/9 sec/cell\n # Grid Limits\n # Name x1 x2 y1 y2\n # OK24 137.53666670 141.53000000 39.53666670 44.26333330\n # HNO 138.50000000 140.55000000 40.51666670 43.30000000\n # OK08 138.50111110 140.55222220 40.52111110 43.29888890\n # OK03 139.38925930 139.66407410 41.99592590 42.27074070\n # AO15 139.43419750 139.49987650 42.03118520 42.07251850\n # MO01 139.41123460 139.43320990 42.07790120 42.14580250\n # MB05 139.41385190 139.42639510 42.09458550 42.10343920\n \n #geodata.regions.append([1, 1, 0., 1e9, 0.0, 360.0, -90.0, 90.0]) ## OK24: 24-s, ~550-740 m Entire Domain\n geodata.regions.append([1, 2, 0., 1e9, 138.5, 139.7, 41.4, 43.3]) ## OK08: 8-s, ~184-247 m Okushiri \n geodata.regions.append([1, 3, 0., 1e9, 139.39, 139.6, 42.0, 42.25]) ## OK03: 2.67 s (8/3s), ~61-82 m Okushiri \n # geodata.regions.append([1, 4, 0., 1e9, 139.42, 139.57, 42.03, 42.23]) ## AO15: 0.53-8/9 s, ~16.5-20.4 m, Aonae \n #geodata.regions.append([1, 4, 0., 1e9, 139.40, 139.46, 42.03, 42.22]) ## West coast Okushiri\n geodata.regions.append([4, 4, 90., 1e9, 139.42, 139.431, 42.07, 42.12])\n \n\n # == setgauges.data values ==\n geodata.gauges = []\n # for gauges append lines of the form [gaugeno, x, y, t1, t2]\n \n # geodata.gauges.append([1,139.429211710298,42.188181491811,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([3,139.411185686023,42.162762869034,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([5,139.418261206409,42.137404393442,0.0,1e9]) ## Tsuji Obs\n geodata.gauges.append([6,139.428035766149,42.093012384481,0.0,1e9]) ## Tsuji Obs\n geodata.gauges.append([7,139.426244998662,42.116554785296,0.0,1e9]) ## Tsuji Obs\n geodata.gauges.append([8,139.423714744650,42.100414145210,0.0,1e9]) ## Tsuji Obs\n geodata.gauges.append([9,139.428901803617,42.076636582137,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([10,139.427853421935,42.065461519438,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([11,139.451539852594,42.044696547058,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([12,139.456528443496,42.051692262353,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([13,139.456528443496,42.051692262353,0.0,1e9]) ## Tsuji Obs\n # \n # == setfixedgrids.data values ==\n\n geodata.fixedgrids = []\n \n for g in geodata.gauges:\n xg = g[1]\n yg = g[2]\n xg1 = xg - 0.001\n xg2 = xg + 0.002\n yg1 = yg - 0.001\n yg2 = yg + 0.002\n nx = 31\n ny = 31\n gaugeno = g[0]\n if gaugeno == 9:\n xg2 = xg + 0.003\n nx = 41\n if gaugeno == 8:\n xg1 = xg - 0.002\n xg2 = xg + 0.001\n yg1 = yg - 0.002\n yg2 = yg + 0.001\n \n geodata.fixedgrids.append([210.0,360.0,11,xg1,xg2,yg1,yg2,nx,ny,0,1])\n geodata.regions.append([5, 5, 180., 1e9, xg1,xg2,yg1,yg2])\n \n \n return rundata\n\n # end of function setgeo\n # ----------------------", "def rad_field_initial_condition(self):\n\n # revert in viewing direct\n angle, _ = f.convert_direction(self.receiver_elevation, self.receiver_azimuth)\n # Looking at the sky\n if angle < 90:\n I_init = (\n self.sun_intensity\n * f.delta_func(self.sun_elevation - self.receiver_elevation)\n * f.delta_func(self.sun_azimuth - self.receiver_azimuth)\n )\n\n # Looking at the ground\n elif angle > 90:\n I_ground = RT_model_1D.calc_direct_beam_intensity(self, 0)\n\n I_lambert = (\n I_ground\n * self.ground_albedo\n * np.cos(np.deg2rad((self.sun_elevation + 180) % 360))\n )\n\n I_specular = (\n I_ground\n * self.ground_albedo\n * f.delta_func(self.sun_elevation + self.receiver_elevation - 180)\n * f.delta_func(self.sun_azimuth - self.receiver_azimuth)\n )\n\n I_init = (\n 1 - self.reflection_type\n ) * I_lambert + self.reflection_type * I_specular\n\n else:\n I_init = np.empty(self.stokes_dim)\n I_init.fill(np.nan)\n\n return I_init", "def setup_orbit(self, t, halo_gas_density, galaxy_velocity):\n \n if any( [halo_gas_density > 1.0E-10] ) : # convert to mass density\n halo_gas_density = halo_gas_density * self.ic['mu_halo'] * cgs.mp\n \n # if t is an array, then use a cubic spline to make a function from the orbital\n # data. If t is a single value, then halo gas dnesity and velocity are constants..\n # make them into functions anyway to make rest of everything work...\n if np.size(halo_gas_density) > 1 : \n self.halo_density = interpolate.UnivariateSpline(t, halo_gas_density,k=3)\n else:\n self.halo_density = lambda x: halo_gas_density\n \n if np.size(galaxy_velocity) > 1:\n self.galaxy_velocity = interpolate.UnivariateSpline(t, galaxy_velocity ,k=3)\n else:\n self.galaxy_velocity = lambda x: galaxy_velocity", "def initializeSides(self, sides):\n self.conditions = \"All\"\n self.check = \"lambda x, y, z=0: True\"", "def make_boundaries(self):\n p = self.project\n c = p[0]\n outlet = p.NewOutlet('GW', c.x, c.y, c.z - c.soildepth)\n cmf.FreeDrainagePercolation(c.layers[-1], outlet)\n rainfall = cmf.timeseries.from_sequence(self.starttime, cmf.day, [25, 0, 0, 0, 0, 0, 0] * 200)\n p.rainfall_stations.add('Heavy rain once a week', rainfall, (0, 0, 0))\n print(cmf.describe(p.rainfall_stations))\n p.use_nearest_rainfall()\n\n return outlet", "def setup(self):\n try:\n self.homedata = pyatmo.HomeData(self.auth)\n self.homestatus = pyatmo.HomeStatus(self.auth, home_id=self.home_id)\n self.home_name = self.homedata.getHomeName(self.home_id)\n self.update()\n except TypeError:\n _LOGGER.error(\"ThermostatData::setup() got error\")\n return False\n except pyatmo.exceptions.NoDevice:\n _LOGGER.debug(\n \"No climate devices for %s (%s)\", self.home_name, self.home_id\n )\n return False\n return True", "def __init__(__self__, *,\n conditions: Optional[Sequence['_meta.v1.outputs.ConditionPatch']] = None,\n observed_generation: Optional[int] = None,\n type_checking: Optional['outputs.TypeCheckingPatch'] = None):\n if conditions is not None:\n pulumi.set(__self__, \"conditions\", conditions)\n if observed_generation is not None:\n pulumi.set(__self__, \"observed_generation\", observed_generation)\n if type_checking is not None:\n pulumi.set(__self__, \"type_checking\", type_checking)", "def __init__(self):\r\n config = ConfigProvider().getProcessingConfig()\r\n self.xGround = config.get(\"xGround\")\r\n self.yGround = config.get(\"yGround\")", "def prepare_fg(\n self, times, wavelength, spectra, stellar, intensities, telluric, area=None\n ):\n\n if area is None:\n orb = Orbit(self.star, self.planet)\n area = orb.stellar_surface_covered_by_planet(times)\n\n model = stellar * telluric\n\n # Normalize the profile of the observations\n profile = np.nanmean(spectra, axis=1)\n model_profile = np.nanmean(model, axis=1)\n norm = profile / model_profile\n\n # Normalize the spectrum\n # model = stellar * telluric * norm[:, None]\n # profile = np.median(spectra, axis=0)\n # model_profile = np.median(model, axis=0)\n\n # nm = np.nanmedian(profile / model_profile)\n # norm *= nm\n\n # model = stellar * telluric * norm[:, None]\n # diff = spectra - model\n\n # model = np.nanmedian(spectra, axis=0)\n\n # f = -(\n # # np.nan_to_num(intensities) *\n # self.area_atmosphere\n # / self.area_planet\n # * area[:, None]\n # # * np.nan_to_num(telluric, nan=1)\n # * norm[:, None]\n # )\n # f = np.nan_to_num(intensities) * np.nan_to_num(telluric, nan=1) * norm[:, None]\n area *= self.area_atmosphere / self.area_planet\n f = -np.nan_to_num(intensities, nan=1) * area[:, None]\n if hasattr(f, \"to_value\"):\n f = f.to_value(1)\n\n # g = spectra - stellar * telluric * norm[:, None]\n # if self.n_sysrem is not None:\n # g = sysrem(g, self.n_sysrem)\n\n g = spectra\n if self.n_sysrem is not None:\n # Use SVD directly instead of Sysrem\n g = sysrem(spectra, self.n_sysrem)\n # u, s, vh = np.linalg.svd(spectra, full_matrices=False)\n # s[: self.n_sysrem] = 0\n # s[80:] = 0\n # ic = (u * s) @ vh\n # g = ic\n else:\n # g = spectra - stellar * telluric * norm[:, None]\n gen = np.random.default_rng()\n tmp = sysrem(spectra, 5)\n g = gen.normal(\n loc=np.nanmean(tmp), scale=np.nanstd(tmp), size=spectra.shape\n )\n # g *= np.nanstd() # std of random is 1 (in theory)\n\n # norm = np.nanstd(g, axis=0)\n # f /= norm\n # g /= norm\n\n # plt.imshow(g, aspect=\"auto\", origin=\"lower\")\n # plt.xlabel(\"Wavelength\")\n # plt.ylabel(\"Time\")\n # plt.title(f\"N_Sysrem: {self.n_sysrem}\")\n # plt.savefig(f\"spectra_sysrem_{self.n_sysrem}.png\")\n\n return wavelength, f, g", "async def test_default_setup_params(opp):\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"min_temp\") == 110\n assert state.attributes.get(\"max_temp\") == 140", "def weather(obsblock, subarray=DEFAULT) :\n _closeTrial(\"WEATHER\",\n \"Array stopped due to weather conditions. \", subarray=subarray)\n commandlog(\"weather()\", subarray=subarray)\n newProject(\"ct017\", obsblock, \"\", False, subarray=subarray)\n intent(\"noise\", \"O\", subarray=subarray)\n # An integration is required to track the time in the PDB\n _pdbIntegration(subarray=subarray)\n alarmIntegdisable(subarray=subarray)\n print \"Integration alarm disabled...\"", "def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def func(self):\n account = self.account\n city_name = 'Phoenix' if not self.args else self.args\n a = Astral()\n a.solar_depression = 'civil'\n city = a[city_name]\n if not city:\n return\n timezone = city.timezone\n sun = city.sun(date=datetime.date.today(), local=True)\n\n account.msg('Information for %s/%s\\n' % (city_name, city.region))\n account.msg('Timezone: %s' % timezone)\n account.msg('Latitude: %.02f; Longitude: %.02f' % (city.latitude, city.longitude))\n account.msg('Dawn: %s' % str(sun['dawn']))\n account.msg('Sunrise: %s' % str(sun['sunrise']))\n account.msg('Noon: %s' % str(sun['noon']))\n account.msg('Sunset: %s' % str(sun['sunset']))\n account.msg('Dusk: %s' % str(sun['dusk']))", "def __init__(self,\n launchSiteLat,\n launchSiteLon,\n launchSiteElev,\n dateAndTime,\n UTC_offset=0,\n inflationTemperature=0.0,\n forceNonHD=False,\n forecastDuration=4,\n use_async=True,\n requestSimultaneous=True,\n debugging=False,\n progressHandler=None,\n load_on_init=False):\n # Initialize extra forecast-specific variables\n self.forceNonHD = forceNonHD\n self.forecastDuration = forecastDuration\n self.use_async = use_async\n self.requestSimultaneous = requestSimultaneous\n\n self._GFSmodule = None\n\n # This should be the last thing that is called on init, since the base\n # (environment) class init calls self.load (if load_on_init is True)\n super(forecastEnvironment, self).__init__(\n inflationTemperature=inflationTemperature,\n launchSiteLat=launchSiteLat,\n launchSiteLon=launchSiteLon,\n launchSiteElev=launchSiteElev,\n dateAndTime=dateAndTime,\n UTC_offset=UTC_offset,\n debugging=debugging,\n load_on_init=load_on_init)", "def setgeo(rundata):\n#-------------------\n\n try:\n geodata = rundata.geodata\n except:\n print \"*** Error, this rundata has no geodata attribute\"\n raise AttributeError(\"Missing geodata attribute\")\n\n # == setgeo.data values ==\n\n geodata.variable_dt_refinement_ratios = True\n\n geodata.igravity = 1\n geodata.gravity = 9.81\n geodata.icoordsys = 2\n geodata.Rearth = 6367.5e3\n geodata.icoriolis = 0\n\n # == settsunami.data values ==\n geodata.sealevel = 0.\n geodata.drytolerance = 1.e-3\n geodata.wavetolerance = 1.e-1\n geodata.depthdeep = 1.e2\n geodata.maxleveldeep = 4\n geodata.ifriction = 1\n geodata.coeffmanning =.025\n geodata.frictiondepth = 200.\n\n\n # == settopo.data values ==\n geodata.topofiles = []\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n geodata.topofiles.append([3, 1, 1, 0., 1e10, 'ebanda.asc'])\n \n\n # == setdtopo.data values ==\n geodata.dtopofiles = []\n # for moving topography, append lines of the form: (<= 1 allowed for now!)\n # [topotype, minlevel, maxlevel, fname]\n geodata.dtopofiles.append([3,1,3,'BandaArc1852.tt3'])\n\n geodata.iqinit = 0\n geodata.qinitfiles = []\n\n # == setgauges.data values ==\n geodata.gauges = []\n # for gauges append lines of the form [gaugeno,x,y,t1,t2]\n geodata.gauges.append([1, 109.000, -7.789, 0., 1e10]) #Cialciap\n geodata.gauges.append([2, 109.040, -7.722, 0., 1e10]) #Cialciap Bay\n geodata.gauges.append([3, 110.292, -8.027, 0., 1e10]) #Bantul\n geodata.gauges.append([4, 111.086, -8.233, 0., 1e10]) #Pacitan\n geodata.gauges.append([5, 111.558, -8.319, 0., 1e10]) #Pelang Beach\n geodata.gauges.append([6, 111.968, -8.286, 0., 1e10]) #Sine Beach\n geodata.gauges.append([7, 112.982, -8.326, 0., 1e10]) #Guying\n geodata.gauges.append([8, 113.176, -8.286, 0., 1e10]) #Muara\n geodata.gauges.append([9, 113.461, -8.383, 0., 1e10]) #Puger\n geodata.gauges.append([10, 113.336, -8.506, 0., 1e10]) #Barung Island\n geodata.gauges.append([11, 114.110, -8.621, 0., 1e10]) #Lampon\n geodata.gauges.append([12, 114.396, -8.231, 0., 1e10]) #Banyuwani\n geodata.gauges.append([13, 112.880, -7.278, 0., 1e10]) #Surabiya\n geodata.gauges.append([14, 114.965, -8.533, 0., 1e10]) #Tabanan\n geodata.gauges.append([15, 115.144, -8.697, 0., 1e10]) #Kuta\n geodata.gauges.append([16, 115.193, -8.848, 0., 1e10]) #Nusa Dua\n geodata.gauges.append([17, 116.064, -8.586, 0., 1e10]) #Mataram\n geodata.gauges.append([18, 115.260, -8.727, 0., 1e10]) #Sanur\n geodata.gauges.append([19, 116.031, -8.873, 0., 1e10]) #Sepi Bay\n geodata.gauges.append([20, 116.135, -8.872, 0., 1e10]) #Serangan Beach\n geodata.gauges.append([21, 116.283, -8.902, 0., 1e10]) #Kuta Lombok\n geodata.gauges.append([22, 116.400, -8.868, 0., 1e10]) #Awang Bay\n geodata.gauges.append([23, 116.466, -8.924, 0., 1e10]) #Surga Beach\n geodata.gauges.append([24, 116.744, -8.918, 0., 1e10]) #Maluk\n geodata.gauges.append([25, 116.833, -9.047, 0., 1e10]) #Tongo\n geodata.gauges.append([26, 117.199, -9.023, 0., 1e10]) #Linyuk\n geodata.gauges.append([27, 117.762, -8.939, 0., 1e10]) #Leppu\n geodata.gauges.append([28, 118.377, -8.785, 0., 1e10]) #Huu\n geodata.gauges.append([29, 118.172, -8.780, 0., 1e10]) #Rontu Beach\n geodata.gauges.append([30, 119.403, -8.729, 0., 1e10]) #Mantea Alley\n geodata.gauges.append([31, 119.374, -9.788, 0., 1e10]) #Nihiwatu\n geodata.gauges.append([32, 119.466, -9.742, 0., 1e10]) #Waigalli\n geodata.gauges.append([33, 119.945, -9.975, 0., 1e10]) #Tarimbang Beach\n geodata.gauges.append([34, 120.183, -10.233, 0., 1e10]) #Lalindi\n geodata.gauges.append([35, 120.264, -10.257, 0., 1e10]) #Manoekangga\n geodata.gauges.append([36, 120.546, -10.241, 0., 1e10]) #Baing\n geodata.gauges.append([37, 120.312, -9.661, 0., 1e10]) #Waingapu\n geodata.gauges.append([38, 119.871, -8.501, 0., 1e10]) #Labun Badjo\n geodata.gauges.append([39, 120.604, -8.822, 0., 1e10]) #Mborong\n geodata.gauges.append([40, 123.560, -10.166, 0., 1e10]) #Kupang\n geodata.gauges.append([41, 121.824, -10.491, 0., 1e10]) #Baa", "def evolve_satellite(t, included_physics, halo_gas_density, galaxy_velocity, galaxy_gas_density, rho_DM, M_o, R_o, physics_kwargs={}, RPS_KH_exclusive = False):\n # included physics is going to be a list of the physics \"modules\" to evovel\n # right now, options should be 'KH' and 'RPS'\n\n physics_kwargs_copy = copy.deepcopy(physics_kwargs)\n # do a check of input parameters. Are they functions or constants?\n \n if not hasattr(halo_gas_density, '__call__'):\n halo_gas_density = lambda x : halo_gas_density\n \n if not hasattr(galaxy_velocity, '__call__'):\n galaxy_velocity = lambda x : galaxy_velocity\n \n # galaxy gas density should be function of radius in galaxy!!!\n if not hasattr(galaxy_gas_density, '__call__'):\n galaxy_gas_density = lambda x : galaxy_gas_density # constant!\n \n # assume KH and RPS are off unless in list of included physics\n KH_const = 0.0; RPS_const = 0.0\n \n if 'KH' in included_physics:\n KH_const = 1.0\n\n if not 'KH' in physics_kwargs_copy.keys(): # bookkeeping if off\n physics_kwargs_copy['KH'] = {}\n \n if 'RPS' in included_physics:\n RPS_const = 1.0\n \n if not 'RPS' in physics_kwargs_copy.keys(): # bookkeeping if off\n physics_kwargs_copy['RPS'] = {}\n \n # if alpha is contained in physcis kwargs... strip it to be \n # used in the RPS condition function call, as it is not used in the\n # RPS mass loss rate calculation\n if 'alpha' in physics_kwargs_copy['RPS'].keys():\n alpha = physics_kwargs_copy['RPS']['alpha']\n physics_kwargs_copy['RPS'].pop('alpha',None)\n else:\n alpha = 1.0\n \n # need to come up with some way to make a function on the fly... constants is fine\n # but if this gets complicated then.... yaa.....\n \n ode_function = lambda y, t, A, B:\\\n A * _KH_evolution(y, t, halo_gas_density, galaxy_velocity,\n galaxy_gas_density, **physics_kwargs_copy['KH'])+\\\n B * _RPS_evolution(y, t, halo_gas_density, galaxy_velocity,\n galaxy_gas_density,\n galaxy_gas_density(0.0), **physics_kwargs_copy['RPS'])\n \n # write a loop here... solve step by step\n M = np.zeros(np.size(t)); R = np.zeros(np.size(t))\n M[0] = M_o; R[0] = R_o\n keep_looping = True; i = 0; turn_KH_off = 1.0 \n while (i < np.size(t) - 1) and keep_looping:\n \n # check if ram pressure stripping occurs\n if 'RPS' in included_physics:\n # integrate and test around the current radius\n# rps_cond = _RPS_condition(np.linspace(0.9999*R[i],1.0001*R[i],5), rho_DM, galaxy_gas_density, \n# halo_gas_density(t[i]), galaxy_velocity(t[i]), alpha=alpha)\n\n rps_cond = _RPS_condition(R[i], rho_DM, galaxy_gas_density, halo_gas_density(t[i]),\n galaxy_velocity(t[i]), alpha=alpha)\n \n # if RPS is valid at current radius, use it... otherwise set to zero\n if rps_cond > 0:\n RPS_const = 1.0\n else:\n RPS_const = 0.0 \n \n if RPS_KH_exclusive and RPS_const == 1.0: # turn KH off\n turn_KH_off = 0.0\n elif RPS_KH_exclusive and RPS_const == 0.0: # turn KH on\n turn_KH_off = 1.0\n else: # else just keep it the same\n turn_KH_off = KH_const \n \n ode_function_args = (KH_const * turn_KH_off, RPS_const,)\n \n \n \n soln = integrate.odeint(ode_function, [M[i],R[i]], t[i:i+2], \n args=ode_function_args,\n mxhnil=0, ixpr=False)\n M[i+1] = soln[1,0]; R[i+1] = soln[1,1]\n \n i = i + 1\n \n simple_check = M[i] + ode_function([M[i],R[i]], t[i], *ode_function_args)[0] * (t[i] - t[i-1])\n \n if M[i] <= 0.0 or R[i] <= 0.0 or simple_check <= 0.0:\n M[i] = 0.0; R[i] = 0.0\n keep_looping = False\n \n \n return M, R", "def setup_simulation(self, **kwargs):\n\n self.distance = self.config[\"site\"][\"distance\"]\n self.num_substations = self.config[\"num_substations\"]\n\n self.initialize_substructure_production()\n self.initialize_installation_vessel()", "def _set_weather(self, month):\n mode = 0.0\n if month in Weather.winter_months:\n mode = -1.0\n elif month in Weather.summer_months:\n mode = 1.0\n self.temp += min(max(-20.0, random.triangular(-10.0, 10.0, mode)), 100.0)\n self.status = random.choice(list(Weather.status))", "def init_fig():\r\n # Set the axis and plot titles\r\n orbit, = ax.plot([], [], [])\r\n satellite, = ax.plot([], [], [], 'o', color='red')\r\n earth, = ax.plot([], [], [], 'o', color='green')\r\n time_text.set_text('')\r\n ax.set_title(Title_3D, fontsize=22)\r\n ax.set_xlim3d([-lim, lim])\r\n ax.set_xlabel('I\\n[km]')\r\n ax.set_ylim3d([-lim, lim])\r\n ax.set_ylabel('J\\n[km]')\r\n ax.set_zlim3d([-lim, lim])\r\n ax.set_zlabel('K\\n[km]')\r\n # plot Earth\r\n\r\n u = np.linspace(0, 2 * np.pi, 100)\r\n v = np.linspace(0, np.pi, 100)\r\n x = R_moon * np.outer(np.cos(u), np.sin(v))\r\n y = R_moon * np.outer(np.sin(u), np.sin(v))\r\n z = R_moon * np.outer(np.ones(np.size(u)), np.cos(v))\r\n ax.plot_wireframe(x, y, z, color=\"grey\", label=\"Moon\", linewidth=0.3, rstride=7, cstride=7)\r\n # Must return the list of artists, but we use a pass\r\n # through so that they aren't created multiple times\r\n return orbit, satellite, earth, time_text", "def __init__(self):\n self._read_calibration_data()\n self.set_oversamplings_and_mode(\n HumidityOversampling.x08,\n TemperatureOversampling.x08,\n PressureOversampling.x16,\n SensorMode.Normal)\n self.set_config(\n InactiveDuration.ms1000,\n FilterCoefficient.fc04)", "def init_data(my_data, rp):\n\n msg.bold(\"initializing the isothermal atmosphere problem...\")\n\n # make sure that we are passed a valid patch object\n if not isinstance(my_data, patch.CellCenterData2d):\n print(\"ERROR: patch invalid in isothermal.py\")\n print(my_data.__class__)\n sys.exit()\n\n # get the density, momenta, and energy as separate variables\n dens = my_data.get_var(\"density\")\n xmom = my_data.get_var(\"x-momentum\")\n ymom = my_data.get_var(\"y-momentum\")\n ener = my_data.get_var(\"energy\")\n\n gamma = rp.get_param(\"eos.gamma\")\n\n grav_const = rp.get_param(\"compressible.grav\")\n cs = rp.get_param(\"isothermal.cs\")\n\n eddington_ratio = rp.get_param(\"isothermal.eddington\")\n\n dens1 = rp.get_param(\"isothermal.dens1\")\n\n amp = rp.get_param(\"isothermal.amp\")\n nwaves = rp.get_param(\"isothermal.nwaves\")\n\n xmax = rp.get_param(\"mesh.xmax\")\n ymax = rp.get_param(\"mesh.ymax\")\n\n if grav_const != 0.0:\n scale_height = cs*cs/numpy.abs(grav_const)\n else:\n scale_height = 0.1\n\n print(\"scale height:\",scale_height)\n\n smallpres = 1.e-10\n smalldens = smallpres/(cs**2)\n\n # compute optical depth\n kappa = 1.0\n c = 1.0\n column_density = dens1*scale_height*(1.0-numpy.exp(-ymax))\n optical_depth = column_density*kappa\n I_0 = (1./numpy.pi)*eddington_ratio*c*numpy.abs(grav_const) \\\n *column_density/(1.0-numpy.exp(-optical_depth))\n my_data.set_aux(\"surface_brightness\", I_0)\n my_data.set_aux(\"speed_of_light\", c)\n my_data.set_aux(\"opacity\",kappa)\n print(\"optical depth:\",optical_depth)\n print(\"surface brightness:\",I_0)\n\n # compute Eddington ratio\n rad_accel = (numpy.pi*I_0)*kappa/c* \\\n (1.0-numpy.exp(-optical_depth))/optical_depth \\\n # mass weighted flux (plane-parallel radiation, constant kappa)\n net_accel = rad_accel + grav_const\n eddington_ratio = rad_accel/numpy.abs(grav_const)\n print(\"eddington_ratio:\",eddington_ratio)\n print(\"net accel:\",net_accel)\n\n # initialize the components, remember, that ener here is\n # rho*eint + 0.5*rho*v**2, where eint is the specific\n # internal energy (erg/g)\n xmom.d[:,:] = 0.0\n ymom.d[:,:] = 0.0\n dens.d[:,:] = 0.0\n\n if rp.get_param('restart.flag') != 0:\n # reload simulation state from file\n grid,cells = patch.read(rp.get_param('restart.snapshot'))\n dens.d[:,:] = cells.get_var('density').d\n xmom.d[:,:] = cells.get_var('x-momentum').d\n ymom.d[:,:] = cells.get_var('y-momentum').d\n ener.d[:,:] = cells.get_var('energy').d\n \n ## must set time!!\n my_data.t = cells.t\n else:\n # set the density to be stratified in the y-direction\n myg = my_data.grid\n\n p = myg.scratch_array()\n\n dens.d[:,:] = dens1*numpy.exp(-myg.y2d/scale_height)\n dens.d[dens.d < smalldens] = smalldens\n p.d[:,:] = dens.d * cs**2 / gamma\n\n # set the velocity perturbations\n u = 0.\n\n A = amp*numpy.random.rand(dens.d.shape[0],dens.d.shape[1])\n # v = A*(1+numpy.cos(nwaves*numpy.pi*myg.x2d/xmax))*0.5\n v = A*(numpy.cos(nwaves*numpy.pi*myg.x2d/xmax))*0.5\n\n # set the momenta\n xmom.d[:,:] = dens.d * u\n ymom.d[:,:] = dens.d * v\n\n # set the energy (P = cs2*dens/gamma)\n ener.d[:,:] = p.d[:,:]/(gamma - 1.0) + \\\n 0.5*(xmom.d[:,:]**2 + ymom.d[:,:]**2)/dens.d[:,:]", "async def test_setup_params(opp):\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"temperature\") == 119\n assert state.attributes.get(\"away_mode\") == \"off\"\n assert state.attributes.get(\"operation_mode\") == \"eco\"", "def generate_condition_data(self):\n # set 'Conditions' column to NA\n self.output['Conditions'] = 'NA'\n\n # instantiate new MarkovChain object\n MC = MarkovChain()\n\n # apply forecast function on 'Conditions' column based on temperature\n # and humidity values for each observation period\n params = self.output[[\"Temperature\", \"Humidity\"]]\n self.output[['Conditions']] = params.apply(\n lambda x: MC.forecast_weather(x.values[0], x.values[1]), axis=1)", "async def async_setup_platform(hass, hass_config, async_add_entities,\n discovery_info=None):\n client = hass.data[DOMAIN]['client']\n\n entities = [GeniusWaterHeater(client, z)\n for z in client.hub.zone_objs if z.type in GH_HEATERS]\n\n async_add_entities(entities)", "def setUp(self):\n self.lower_bound = 10\n self.upper_bound = 20\n self.middle_value = (self.lower_bound + self.upper_bound)/2\n self.more_than_upper_bound = self.upper_bound + 10\n self.window_test_filter = RangeFilter(Type.CLOSED, self.lower_bound, self.upper_bound)", "def _setupWeather(self, w, config):\n wnames = ('cloud', 'seeing')\n if w not in wnames:\n raise Exception('w should be one of %s' %(wnames))\n filename = config['%s_datafile' %(w)]\n file = open(filename, 'r')\n # Also assume flat file contains only date / value in a space or tab separated file. \n self.dates[w] = []\n self.weather[w] = []\n # Read the data file.\n print '# Reading weather data file %s' %(filename)\n for line in file:\n if line.startswith('#') | line.startswith('!'):\n continue\n self.dates[w].append(line.split()[0])\n self.weather[w].append(line.split()[1])\n file.close()\n self.dates[w] = numpy.array(self.dates[w], float)\n self.weather[w] = numpy.array(self.weather[w], float)\n # Check the total amount of data (mostly for user awareness):\n print '# Read %d weather values from %s file. ' %(len(self.weather[w]), filename)\n # Check that weather data is monotonically increasing in time. \n if not(numpy.all(numpy.diff(self.dates[w]))):\n order = self.dates[w].argsort()\n self.weather[w] = self.weather[w][order]\n self.dates[w] = self.dates[w][order]\n # Get the total length of time included in this (seeing/cloud) file,\n # so that we can determine a wrap-around date if we need that.\n self.maxtime[w] = self.dates[w].max()\n return", "def get_observations(self):\n joint_states = self.joints_state\n self.force = self.wrench_stamped.wrench.force\n self.torque = self.wrench_stamped.wrench.torque\n self.static_taxel = self.tactile_static.taxels\n# dynamic_taxel= tactile_dynamic\n\n# print(\"[force]\", self.force.x, self.force.y, self.force.z)\n# print(\"[torque]\", self.torque.x, self.torque.y, self.torque.z)\n shp_joint_ang = joint_states.position[0]\n shl_joint_ang = joint_states.position[1]\n elb_joint_ang = joint_states.position[2]\n wr1_joint_ang = joint_states.position[3]\n wr2_joint_ang = joint_states.position[4]\n wr3_joint_ang = joint_states.position[5]\n\n shp_joint_vel = joint_states.velocity[0]\n shl_joint_vel = joint_states.velocity[1]\n elb_joint_vel = joint_states.velocity[2]\n wr1_joint_vel = joint_states.velocity[3]\n wr2_joint_vel = joint_states.velocity[4]\n wr3_joint_vel = joint_states.velocity[5]\n\n q = [shp_joint_ang, shl_joint_ang, elb_joint_ang, wr1_joint_ang, wr2_joint_ang, wr3_joint_ang]\n# print(\"q(observation):\", q)\n eef_x, eef_y, eef_z = self.get_xyz(q)\n self.end_effector = self.get_xyz(q)\n eef_x_ini, eef_y_ini, eef_z_ini = self.get_xyz(self.init_joint_pose2) \n\n delta_image_r, delta_image_l = self.get_image()\n self.cnn_image_r = agent.update_cnn(delta_image_r)\n self.cnn_image_l = agent.update_cnn(delta_image_l)\n self.cnn_image_r_list = self.cnn_image_r.tolist()\n self.cnn_image_l_list = self.cnn_image_l.tolist()\n print(\"r_list\", self.cnn_image_r_list)\n print(\"l_list\", self.cnn_image_l_list)\n\n observation = []\n# rospy.logdebug(\"List of Observations==>\"+str(self.observations))\n for obs_name in self.observations:\n if obs_name == \"shp_joint_ang\":\n observation.append((shp_joint_ang - self.init_joint_pose2[0]) * self.joint_n)\n elif obs_name == \"shl_joint_ang\":\n observation.append((shl_joint_ang - self.init_joint_pose2[1]) * self.joint_n)\n elif obs_name == \"elb_joint_ang\":\n observation.append((elb_joint_ang - self.init_joint_pose2[2]) * self.joint_n)\n elif obs_name == \"wr1_joint_ang\":\n observation.append((wr1_joint_ang - self.init_joint_pose2[3]) * self.joint_n)\n elif obs_name == \"wr2_joint_ang\":\n observation.append((wr2_joint_ang - self.init_joint_pose2[4]) * self.joint_n)\n elif obs_name == \"wr3_joint_ang\":\n observation.append((wr3_joint_ang - self.init_joint_pose2[5]) * self.joint_n)\n elif obs_name == \"shp_joint_vel\":\n observation.append(shp_joint_vel)\n elif obs_name == \"shl_joint_vel\":\n observation.append(shl_joint_vel)\n elif obs_name == \"elb_joint_vel\":\n observation.append(elb_joint_vel)\n elif obs_name == \"wr1_joint_vel\":\n observation.append(wr1_joint_vel)\n elif obs_name == \"wr2_joint_vel\":\n observation.append(wr2_joint_vel)\n elif obs_name == \"wr3_joint_vel\":\n observation.append(wr3_joint_vel)\n elif obs_name == \"eef_x\":\n observation.append((eef_x - eef_x_ini) * self.eef_n)\n elif obs_name == \"eef_y\":\n observation.append((eef_y - eef_y_ini) * self.eef_n)\n elif obs_name == \"eef_z\":\n observation.append((eef_z - eef_z_ini) * self.eef_n)\n elif obs_name == \"force_x\":\n observation.append((self.force.x - self.force_ini.x) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_y\":\n observation.append((self.force.y - self.force_ini.y) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_z\":\n observation.append((self.force.z - self.force_ini.z) / self.force_limit1 * self.force_n)\n elif obs_name == \"torque_x\":\n observation.append((self.torque.x - self.torque_ini.x) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_y\":\n observation.append((self.torque.y - self.torque_ini.y) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_z\":\n observation.append((self.torque.z - self.torque_ini.z) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"image_cnn\":\n for x in range(0, 10):\n observation.append(self.cnn_image_r_list[0][x])\n# print(\"r_list\", self.cnn_image_r_list[0][x])\n for x in range(0, 10):\n observation.append(self.cnn_image_l_list[0][x])\n# print(\"l_list\", self.cnn_image_l_list[0][x])\n elif obs_name == \"static_taxel\":\n for x in range(0, 28):\n observation.append((self.static_taxel[0].values[x] - self.static_taxel_ini[0].values[x]) * self.taxel_n)\n for x in range(0, 28):\n observation.append((self.static_taxel[1].values[x] - self.static_taxel_ini[1].values[x]) * self.taxel_n)\n# elif obs_name == \"dynamic_taxel\":\n# observation.append(dynamic_taxel[0].values) * self.taxel_n\n# observation.append(dynamic_taxel[1].values) * self.taxel_n\n else:\n raise NameError('Observation Asked does not exist=='+str(obs_name))\n\n print(\"observation\", list(map(round, observation, [3]*len(observation))))\n# print(\"observation\", observation)\n\n return observation", "def setup_platform(hass, config, add_entities, discovery_info=None):\n\n name = config.get(CONF_NAME)\n token = config.get(CONF_API_KEY)\n latitude = config.get(CONF_LATITUDE, hass.config.latitude)\n longitude = config.get(CONF_LONGITUDE, hass.config.longitude)\n _LOGGER.debug(\"Using latitude and longitude: %s, %s\", latitude, longitude)\n scan_interval = config[CONF_SCAN_INTERVAL]\n sensors = []\n for variable in config[CONF_MONITORED_CONDITIONS]:\n sensors.append(AirlySensor(name, variable, latitude, longitude, token))\n add_entities(sensors, True)", "def set_earth(inclination, phases):\n cosi, sini = np.cos(inclination), np.sin(inclination)\n cosp = np.cos(2*np.pi*phases)\n sinp = np.sin(2*np.pi*phases)\n return CartesianRepresentation(sini*cosp, -sini*sinp, cosi)", "def _init_world(self):\n self.world.restricted_world = {\n 'not_road': [],\n 'cross_road': [],\n }\n for polygon in self._data_loader.data.get_polygons(0):\n polygon_name = polygon['label']\n polygon_points = polygon['points']\n if polygon_name in {'not_road', 'cross_road'}:\n self.world.restricted_world[polygon_name].append(geometry.Polygon(\n self._data_loader.convertIMG2PLAY(polygon_points)\n ))", "def setUp(self):\n self.mixing_ratio = np.array([0.1, 0.2, 0.3], dtype=np.float32)\n self.specific_heat = np.array([1089.5, 1174.0, 1258.5], dtype=np.float32)\n self.latent_heat = np.array([2531771.0, 2508371.0, 2484971.0], dtype=np.float32)\n self.temperature = np.array([185.0, 260.65, 338.15], dtype=np.float32)", "def setUp(self):\n\n self.eps = 0.001 # Accept 0.1 % relative error\n\n self.RSISE = Point(-35.27456, 149.12065)\n self.Home = Point(-35.25629, 149.12494) # 28 Scrivener Street, ACT\n self.Syd = Point(-33.93479, 151.16794) # Sydney Airport\n self.Nadi = Point(-17.75330, 177.45148) # Nadi Airport\n self.Kobenhavn = Point(55.70248, 12.58364) # Kobenhavn, Denmark\n self.Muncar = Point(-8.43, 114.33) # Muncar, Indonesia", "def get_allsky(self):\n band = self.get_band()\n septon = self.is_septon()\n if band == '10_90' or band == '30_90' or septon:\n allsky = True\n else:\n allsky = False\n return allsky", "def get_geo_data(self):\n # Get all countries and create a dictionary by name\n countries_shp = shpreader.natural_earth(\n resolution='10m',\n category='cultural',\n name='admin_0_countries',\n )\n self.countries = list(shpreader.Reader(countries_shp).records())\n self.countries_by_name = {}\n self.countries_by_iso_a2 = {}\n for country in shpreader.Reader(countries_shp).records():\n self.countries_by_name[country.attributes['NAME_LONG']] = country\n self.countries_by_iso_a2[country.attributes['ISO_A2']] = country\n\n # Get all states and create a dictionary by name\n states_provinces_shp = shpreader.natural_earth(\n resolution='50m',\n category='cultural',\n name='admin_1_states_provinces',\n )\n# full_list = list(shpreader.Reader(states_provinces_shp).records())\n# self.states = [x for x in full_list if x.attributes['type_en'] == 'State']\n self.states = list(shpreader.Reader(states_provinces_shp).records())\n self.states_by_name = {}\n for state in self.states:\n self.states_by_name[state.attributes['name']] = state\n\n # Get all timezones and create a dictionary by name\n timezones_shp = shpreader.natural_earth(\n resolution='10m',\n category='cultural',\n name='time_zones',\n )\n self.timezones = list(shpreader.Reader(timezones_shp).records())\n self.timezones_by_name = {}\n for timezone in shpreader.Reader(timezones_shp).records():\n # Try to get the actual name. Something like `Europe/Berlin`\n timezone_name = timezone.attributes['tz_name1st']\n # If there is no name, we default to the utc offset name `-5` `+4.5`\n if timezone_name == '':\n timezone_name = timezone.attributes['name']\n\n if timezone_name not in self.timezones_by_name.keys():\n self.timezones_by_name[timezone_name] = timezone", "def check_ion_environment(self,\n ion_params,\n wavelength = None,\n require_valence = True):\n from iotbx.pdb import common_residue_names_get_class as get_class\n\n identity = self.identity(ion_params)\n inaccuracies = self.inaccuracies[identity] = set()\n self.expected_params[identity] = ion_params\n ignored = self.ignored[identity] = set()\n\n # if the atom is clearly not a water, optionally relax some rules. this\n # will be more sensitive for transition metals, without finding a lot of\n # spurious Mg/Na sites.\n strict_rules = require_valence or \\\n self.is_correctly_identified(identity = \"HOH\") or \\\n self.strict_valence or \\\n ion_params.element in [\"NA\",\"MG\"]\n\n # Check for all non-overlapping atoms within 3 A of the metal\n n_closest = 0\n coord_atoms = []\n for i_pair, contact1 in enumerate(self.nearby_atoms):\n distance = contact1.distance()\n if (distance < 3.0):\n for contact2 in self.nearby_atoms[(i_pair+1):] :\n if ((contact1 == contact2) or\n (contact1.distance_from(contact2) <= 0.3)):\n break\n else :\n coord_atoms.append(contact1)\n if (distance < 2.7):\n n_closest += 1\n\n if len(coord_atoms) < ion_params.coord_num_lower:\n inaccuracies.add(self.TOO_FEW_COORD)\n\n if n_closest > ion_params.coord_num_upper:\n inaccuracies.add(self.TOO_MANY_COORD)\n\n # Coordinating atoms closer than 3.0 A are not positively charged\n n_non_water = 0\n self.bad_coords[identity] = []\n\n for contact in self.nearby_atoms:\n other_name = contact.atom_name()\n other_resname = contact.resname()\n other_element = contact.element\n\n if (not other_resname in WATER_RES_NAMES):\n n_non_water += 1\n else:\n # Everything can potentially be coordinated by water\n continue\n\n if (contact.distance() < 3.0):\n # XXX: So, we have a a fair number of rules restricting nitrogens and\n # nitrogen-containing residues from coordinating a number of cations.\n #\n # However, this rule is dependent on the protonation of the nitrogen,\n # if the pKa is low at the site, it is possible for a metal to\n # coordinate the residue fine.\n #\n # We want a complex rule that takes into account coordinating geometry,\n # density signal, and the presence of other coordinating atoms that\n # might drop the site's pKa enough to lose the hydrogen.\n if ((ion_params.allowed_coordinating_atoms is not None) and\n (other_element not in ion_params.allowed_coordinating_atoms)):\n self.bad_coords[identity].append(contact)\n inaccuracies.add(self.BAD_COORD_ATOM)\n if (get_class(other_resname) == \"common_amino_acid\"):\n # limit elements allowed to bind to backbone atoms (mainly carbonyl\n # oxygen)\n if ((other_name in [\"C\",\"N\",\"O\",\"CA\",\"H\",\"HA\"]) and\n ((ion_params.allowed_backbone_atoms is None) or\n (not other_name in ion_params.allowed_backbone_atoms))):\n if (other_name == \"O\") and (contact.is_carboxy_terminus):\n pass # C-terminal carboxyl group is allowed\n else :\n self.bad_coords[identity].append(contact)\n inaccuracies.add(self.BAD_COORD_ATOM)\n # Check if atom is of an allowed residue type, if part of a sidechain\n if (ion_params.allowed_coordinating_residues is not None):\n allowed = ion_params.allowed_coordinating_residues\n if ((not other_resname in allowed) and\n (other_name not in [\"C\", \"O\", \"N\", \"CA\", \"OXT\"])):\n # XXX probably just O\n self.bad_coords[identity].append(contact)\n inaccuracies.add(self.BAD_COORD_RESIDUE)\n elif (cmp(0, mmtbx.ions.server.get_charge(contact.atom)) ==\n cmp(0, ion_params.charge)):\n # Check if coordinating atom is of opposite charge\n self.bad_coords[identity].append(contact)\n inaccuracies.add(self.LIKE_COORD)\n elif (ion_params.charge > 0 and\n other_element in [\"N\"] and\n other_resname in [\"LYS\", \"ARG\", \"ASN\", \"GLN\"]):\n # Coordinating nitrogen most likely positive.\n #\n # Ignore nitrogens without a charge label that are on positively\n # charged amino acids.\n self.bad_coords[identity].append(contact)\n inaccuracies.add(self.LIKE_COORD)\n\n # Check the number of coordinating waters\n if (n_non_water < ion_params.min_coordinating_non_waters):\n inaccuracies.add(self.TOO_FEW_NON_WATERS)\n\n # Check the geometry of the coordinating atoms\n if ion_params.allowed_geometries and strict_rules:\n allowed = [i[0] in ion_params.allowed_geometries\n for i in self.geometries]\n if \"any\" in ion_params.allowed_geometries:\n pass\n elif not self.geometries:\n if strict_rules:\n inaccuracies.add(self.NO_GEOMETRY)\n elif not any(allowed):\n inaccuracies.add(self.BAD_GEOMETRY)\n else:\n strict_rules = False\n\n # If no distinct geometry, check that none of the coordinating have distinct\n # geometry, either\n if self.geometries == []:\n for contact in self.nearby_atoms:\n o_atom = contact.atom\n if o_atom.i_seq in self.manager.atoms_to_props:\n o_geometry = self.manager.atoms_to_props[o_atom.i_seq].geometries\n if o_geometry != []:\n inaccuracies.add(self.COORDING_GEOMETRY)\n\n # Check for reasonable vector/valence values\n vectors = mmtbx.ions.server.calculate_valences(ion_params,\n self.nearby_atoms)\n self.vectors[identity] = vectors\n\n self.valence_sum[identity] = sum([abs(i) for i in vectors])\n self.vector_sum[identity] = abs(sum(vectors, col((0, 0, 0))))\n\n if self.vector_sum[identity] > ion_params.vec_sum_cutoff:\n if (strict_rules):\n inaccuracies.add(self.BAD_VECTORS)\n else :\n ignored.add(self.BAD_VECTORS)\n\n # XXX I am not sure how low a valence sum we want to allow, but many\n # structures with non-physiological cation binding have partial and/or\n # irregular coordination shells\n if (self.valence_sum[identity] < ion_params.cvbs_expected * 0.25 or\n self.valence_sum[identity] > ion_params.cvbs_expected * 1.25):\n inaccuracies.add(self.VERY_BAD_VALENCES)\n else:\n if (self.valence_sum[identity] < ion_params.cvbs_lower or\n self.valence_sum[identity] > ion_params.cvbs_upper):\n if strict_rules:\n inaccuracies.add(self.BAD_VALENCES)\n else :\n ignored.add(self.BAD_VALENCES)\n\n self.score[identity] = abs(self.valence_sum[identity] -\n ion_params.cvbs_expected)", "def init_geofence(self):\n\t\tself.create_ring()\n\t\tself.create_geofence()" ]
[ "0.6065568", "0.5984571", "0.59448445", "0.5943248", "0.59086376", "0.58542204", "0.57257646", "0.57075846", "0.5674026", "0.56102806", "0.54650813", "0.53581786", "0.5350245", "0.5348858", "0.5346381", "0.53435713", "0.5343278", "0.5335969", "0.5316473", "0.525487", "0.52013665", "0.51992553", "0.51751435", "0.51164347", "0.50932795", "0.5089322", "0.50786", "0.50764143", "0.5053816", "0.5048851", "0.50451994", "0.50447685", "0.5028618", "0.50223213", "0.4991929", "0.49899507", "0.498254", "0.4975665", "0.49601617", "0.49468714", "0.4941902", "0.49400008", "0.49398184", "0.49346647", "0.4912789", "0.4893286", "0.48800522", "0.48752537", "0.48727575", "0.4870614", "0.48650283", "0.48405224", "0.48325118", "0.48282883", "0.4827011", "0.4825913", "0.48226556", "0.4817807", "0.48176813", "0.48083678", "0.48039147", "0.4794099", "0.47933406", "0.47898322", "0.4786515", "0.47746897", "0.47744295", "0.47653982", "0.4758866", "0.4746664", "0.47451267", "0.47437513", "0.4736946", "0.47330508", "0.47325546", "0.47316483", "0.47290504", "0.47197407", "0.4718282", "0.47166288", "0.47132376", "0.47087324", "0.4707096", "0.47064412", "0.47023723", "0.47013238", "0.4694292", "0.4690221", "0.46853477", "0.46640787", "0.46612585", "0.46528548", "0.46523643", "0.46523604", "0.4651491", "0.46503177", "0.46475306", "0.46440145", "0.4643801", "0.46406597" ]
0.5434043
11
RescaledRange Subclass of Prefect Task Class.
def __init__(self): super().__init__() self.data = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rescale(self, xmin, xmax):\n\n # Normalise\n self.normalise()\n\n \n # Rescale\n range = xmax-xmin\n for seg in self.segments:\n seg.lower_bound = seg.lower_bound*range + xmin\n seg.upper_bound = seg.upper_bound*range + xmin", "def clip(self, *args, **kwargs):\n return _uhd_swig.meta_range_t_clip(self, *args, **kwargs)", "def range(self):\n\n return time_stat(self, stat=\"range\")", "def _get_read_range(self):\n\n self.total_size = get_data_size(self.storage, self.read_bucket, self.read_path)\n\n partition_size = floor(self.total_size / self.task_info.num_tasks)\n\n self.lower_bound = self.task_info.task_id * partition_size\n self.upper_bound = self.lower_bound + partition_size\n\n # self.lower_bound, self.upper_bound = adjust_bounds(self.storage, self.read_bucket, self.read_path,\n # self.lower_bound, self.upper_bound, self.total_size)\n\n print(\"Scanning bytes=%d-%d (%d)\"%(self.lower_bound, self.upper_bound,\n self.upper_bound - self.lower_bound))", "def progrange(*args, **kwargs):\n return progress(range(*args), **kwargs)", "def rescale(range1, range2):\n min1, max1, min2, max2 = min(range1), max(range1), min(range2), max(range2)\n def resize(value):\n return (((value - min1) * (max2 - min2)) / (max1 - min1)) + min2\n return resize", "def task_scaling(input_array, scaling_factor):\n return(np.multiply(scaling_factor, input_array))", "def updateRange(self):\n if self.autoFollow:\n self.xrange = self.param.activeRange()\n self.xrange = self.xrange # call getter & setter again to verify limits", "def maximize_reaction_range(start_stop, args):\n #make a sub cache for each thread to write into\n sub_cache = {}\n model = models.init_model(args['model'], species=args['species'],\n exchange_limit=EXCHANGE_LIMIT,\n media=args['media'])\n problem = initialize_cplex_problem(model, args['num_threads'], args['lpmethod'])\n\n #sort by id to ensure consistency across threads\n reactions = sorted(list(model.reactions.values()), key=lambda r:r.id)[start_stop[0]:start_stop[1]]\n for reaction in tqdm(reactions, file=sys.stderr):\n #if reaction.is_exchange:\n # continue\n partner_reaction = reaction.reverse_reaction\n # Set partner reaction upper-limit to 0 in problem\n # Store old limit for later to restore\n if partner_reaction is not None:\n partner_id = partner_reaction.id\n old_partner_ub = problem.variables.get_upper_bounds(partner_id)\n problem.variables.set_upper_bounds(partner_id, 0.0)\n\n utils.reset_objective(problem)\n problem.objective.set_linear(reaction.id, 1.0)\n problem.objective.set_name(str(reaction.id))\n problem.objective.set_sense(problem.objective.sense.maximize)\n\n problem.solve()\n rxn_max = problem.solution.get_objective_value()\n\n sub_cache[reaction.id] = rxn_max\n\n # Restore limit of partner reaction to old state\n if partner_reaction is not None:\n partner_id = partner_reaction.id\n problem.variables.set_upper_bounds(partner_id, old_partner_ub)\n\n return sub_cache", "def m_to_range(self, data):\n return (data - self._min_range_m) / self._total_range", "def get_range(self):\n return time_to_range(self.get_time())", "def set_progress_range(self, maximum):\r\n\r\n pass", "def tnrange(*args, **kwargs): # pragma: no cover\n from ._tqdm_notebook import tnrange as _tnrange\n return _tnrange(*args, **kwargs)", "def __init__(self, min: int = 4, max: int = 10000, name=None):\n\n super().__init__(name=name)\n self.min = min\n self.max = max\n self._parallel_friendly = True", "def set_par_range(self, mins, maxs, frozen):\n self.parmins = mins\n self.parmaxs = maxs\n self.pars_frozen = frozen\n return", "def autoscale(self, A):\n self.vmin = ma.min(A)\n self.vmax = ma.max(A)", "def set_visualization_range(self, start: int, end: int):\n self.__range = (start, end)", "def range(self):\n \n return self._range", "def range100(self):\r\n return self.init(100)", "def map_to_range(val, old_min, old_max, new_min, new_max):\n return new_max - (val - old_min) * (new_max - new_min) / (old_max - old_min)", "def subject(self) -> global___Range:", "def limit_plasma(self, n_min=1e11, n_max=1e22, T_min=0.001, T_max=100.0):\n self.ne = np.clip(self.ne, n_min, n_max)\n self.ni = np.clip(self.ni, n_min, n_max)\n self.nn = np.clip(self.nn, n_min, n_max)\n self.Te = np.clip(self.Te, T_min, T_max)\n self.Ti = np.clip(self.Ti, T_min, T_max)", "def getRange(self):\n return self.range", "def rescale(self, new_throughput):\n\t\treturn type(self)(self.item, self.recipe, new_throughput, self.per_process_outputs)", "def getRange(self, p_int): # real signature unknown; restored from __doc__\n pass", "def rescale(num, old_min, old_max, new_min, new_max):\n old_range = old_max - old_min\n new_range = new_max - new_min\n new_val = new_min + (((num - old_min) * new_range)/old_range)\n\n return new_val", "def _normalize_range():\n clipped = tf.clip_by_value(inputs, self.minimum, self.maximum)\n return -1 + 2 * (clipped - self.minimum) / length", "def scale_range(data, minTo, maxTo):\n minFrom = np.min(data)\n maxFrom = np.max(data)\n \n scaled_data = []\n \n for point in data:\n new_point = minTo + (maxTo - minTo) * ((point - minFrom)/(maxFrom - minFrom))\n scaled_data.append(new_point)\n \n return scaled_data", "def bounds(self) -> Tensor:\n return torch.cat([self.mins, self.mins + self.ranges], dim=-2)", "def setRange(self, range):\n\t\tself.range = range\n\t\tself.slider.setMinimum(0.0)\n\t\tself.slider.setMaximum(100.0)\n\t\tself.spinbox.setRange(self.range[0], self.range[1])\n\n\t\tdiff = self.range[1] - self.range[0]\n\t\tif diff <= 1:\n\t\t\tself.spinbox.setSingleStep(0.01)", "def __init__(self, task_params):\n self.seq_width = task_params[\"seq_width\"]\n self.min_seq_len = task_params[\"min_seq_len\"]\n self.max_seq_len = task_params[\"max_seq_len\"]\n self.min_repeat = task_params[\"min_repeat\"]\n self.max_repeat = task_params[\"max_repeat\"]\n self.in_dim = task_params['seq_width'] + 2\n self.out_dim = task_params['seq_width'] + 1", "def range (self):\n return self._range", "def range (self):\n return self._range", "def _min_max_scale(arr, new_range=(0, 255)):\n # get array's current min and max\n mn = arr.min()\n mx = arr.max()\n\n # check if scaling needs to be done to be in new_range\n if mn < new_range[0] or mx > new_range[1]:\n # perform min-max scaling\n scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]\n else:\n # return array if already in range\n scaled = arr\n\n return scaled", "def __init__(self,\n low,\n high,\n clipping_lower_bound=-np.inf,\n clipping_upper_bound=np.inf):\n super().__init__()\n self._low = low\n self._high = high\n self._clipping_lower_bound = clipping_lower_bound\n self._clipping_upper_bound = clipping_upper_bound", "def setRange(self, x_range, y_range):\n pass", "def range(self):\n return self.range_array", "def adjust_dynamic_range(data, drange_in=(-1, 1), drange_out=(0, 1)):\r\n if drange_in != drange_out:\r\n scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (\r\n np.float32(drange_in[1]) - np.float32(drange_in[0]))\r\n bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)\r\n data = data * scale + bias\r\n return torch.clamp(data, min=0, max=1)", "def _constraints_task_spread(self):\n # encourage scheduling a chunk for every 24 hours\n diag = util.blockdiag(self.num_timeslots, incr=tutil.SLOTS_PER_DAY)\n slots = diag.shape[0]\n\n def rule(model, p, j):\n \"\"\"\n For spread-activated tasks, this rule is used to encourage\n spreading the chunks out on multiple days.\n\n More precisely:\n S[i,j] = whether task j is assigned on day i\n\n Maximizing sum_i S[i,j] encourages spreading out the task chunks\n \"\"\"\n den = sum(diag[p, :])\n ind_i = model.timeslots\n total = sum(diag[p, i] * (\n model.A[i, j] + 2 * model.A2[i, j] + 3 * model.A3[i, j] + 4 *\n model.A4[i, j]) for i in ind_i)\n total /= den\n # Desired: S[i,j] = ceil(total)\n # Desired: S[i,j] = 0 if total <= 0; otherwise, S[i,j] = 1\n return -EPS, model.S[p, j] - total, 1 - EPS\n\n self.model.constrain_spread0 = Constraint(self.model.dayslots,\n self.model.tasks, rule=rule)\n\n def rule(model):\n den = self.num_tasks * slots\n num = 20\n weights = np.ones((7, self.num_tasks))\n for j in range(self.num_tasks):\n weights[:, j] = self.task_spread[j]\n total = summation(weights, model.S) / den * num\n return model.S_total == total\n\n self.model.constrain_spread1 = Constraint(rule=rule)", "def set_desired_capacity(self, new_desired_capacity):\n scale_out = new_desired_capacity - self.desired_capacity\n assert scale_out >= 0\n if scale_out == 0:\n return CompletedFuture(False)\n\n remaining_instances = self.client.get_remaining_instances(self.resource_group, self.instance_type)\n\n futures = []\n for scale_set in sorted(self.scale_sets.values(), key=lambda x: (x.priority, x.name)):\n if scale_set.capacity < _SCALE_SET_SIZE_LIMIT:\n if self.slow_scale:\n new_group_capacity = scale_set.capacity + 1\n else:\n new_group_capacity = min(_SCALE_SET_SIZE_LIMIT, scale_set.capacity + scale_out, scale_set.capacity + remaining_instances)\n if scale_set.provisioning_state == 'Updating':\n logger.warn(\"Update of {} already in progress\".format(scale_set.name))\n continue\n if scale_set.provisioning_state == 'Failed':\n logger.error(\"{} failed provisioning. Skipping it for scaling.\".format(scale_set.name))\n continue\n scale_out -= (new_group_capacity - scale_set.capacity)\n remaining_instances -= (new_group_capacity - scale_set.capacity)\n # Update our cached version\n self.scale_sets[scale_set.name].capacity = new_group_capacity\n futures.append(self.client.update_scale_set(scale_set, new_group_capacity))\n logger.info(\"Scaling Azure Scale Set {} to {}\".format(scale_set.name, new_group_capacity))\n if scale_out == 0 or remaining_instances == 0:\n break\n\n if remaining_instances == 0:\n logger.warning(\"Out of quota for {}!\".format(self.instance_type))\n\n if scale_out > 0:\n logger.error(\"Not enough scale sets to reach desired capacity {} for {}\".format(new_desired_capacity, self))\n\n self.desired_capacity = new_desired_capacity - scale_out\n logger.info(\"ASG: {} new_desired_capacity: {}\".format(self, new_desired_capacity))\n\n return TransformingFuture(True, AllCompletedFuture(futures))", "def get_range(self):\n if self.size == 75:\n return 260\n elif self.size == 100:\n return 315", "def GetScalarRange(self):\n ...", "def rangeLimit(val, minv, maxv):\n\treturn range_limit(val, minv, maxv)", "def remap_interval(val, input_interval_start, input_interval_end, output_interval_start, output_interval_end):\n # your code goes here", "def __init__(self, min_intensity: float, max_intensity: float):\n super().__init__()\n self.min_intensity = min_intensity\n self.max_intensity = max_intensity", "def __init__(self) -> None:\n self.name = \"minmaxScaler\"\n self.min = 0\n self.max = 0", "def _resize_interval(start, end, size):\n center = int(0.5 * (start + end))\n half_size = int(0.5 * size)\n left = center - half_size\n right = left + size\n return left, right", "def scale(self):", "def restrict_non_nbrs_from_repacking(pose, res, task, pack_radius):\n\n center = pose.residue( res ).xyz( pose.residue( res ).nbr_atom() )\n print( \"Res: pack radius: \"+repr(pack_radius) )\n for i in range(1, pose.total_residue() + 1):\n # only pack the mutating residue and any within the pack_radius\n if i == res: continue\n\n nbr = pose.residue( i ).xyz( pose.residue( i ).nbr_atom() )\n dist = nbr.distance(center)\n if dist > pack_radius:\n task.nonconst_residue_task(i).prevent_repacking()\n else:\n task.nonconst_residue_task(i).restrict_to_repacking()\n\n #print task\n return task", "def range_to_m(self, data):\n return data * self._total_range + self._min_range_m", "def initializeDistribution(self):\n self.minVal = min(math.exp(self.upperBound),math.exp(self.lowerBound))\n self.maxVal = max(math.exp(self.upperBound),math.exp(self.lowerBound))", "def __init__(self, min: int, max: int):\n super().__init__()\n\n # store input parameters\n self.min = min\n self.max = max", "def test_get_range(self):\n pass", "def global_range(self):\n raise NotImplementedError", "def create(self, range):\n raise NotImplementedError", "def _calc_range(self) -> np.ndarray:\n if self._is_ct25k():\n range_resolution = 30\n n_gates = 256\n else:\n n_gates = int(self.metadata[\"number_of_gates\"])\n range_resolution = int(self.metadata[\"range_resolution\"])\n return np.arange(n_gates) * range_resolution + range_resolution / 2", "def _setValidRange(self, contribs, valueRange):\n testfor(contribs.ndim == 2, ValueError)\n numContribs, numReps = contribs.shape\n self._validRange = np.zeros_like(contribs.T, dtype = bool)\n for ri in range(numReps):\n # the single set of R for this calculation\n rset = contribs[:, ri]\n self._validRange[ri] = ((rset > min(valueRange))\n * (rset < max(valueRange)))", "def scale_range(x, input_range, target_range):\n\n range = [np.amin(x), np.amax(x)]\n x_std = (x - input_range[0]) / (1.0*(input_range[1] - input_range[0]))\n x_scaled = x_std * (1.0*(target_range[1] - target_range[0])) + target_range[0]\n return x_scaled, range", "def _adjustRange(self, start, end):\n adjusted_start = start\n if self._start:\n if end < self._start:\n return None\n adjusted_start = max(self._start, start)\n \n adjusted_end = end\n if self._end:\n if self._end < start:\n return None\n adjusted_end = min(self._end, end)\n \n return (adjusted_start, adjusted_end)", "def ranges(self, ranges):\n \n self._ranges = ranges", "def set_slider_bounds(self,lower,upper,inclusive_bounds=None):\n self.bounds = (lower,upper)\n\n if inclusive_bounds is not None:\n self.inclusive_bounds = inclusive_bounds\n\n epsilon = max(self.slider['resolution'],0.00000000001)\n\n if self.inclusive_bounds[0] is False:\n lower+=epsilon\n if self.inclusive_bounds[1] is False:\n upper-=epsilon\n self.slider.config(from_=lower,to=upper)", "def __init__(self, min: float, max: float):\n super().__init__()\n\n # store input parameters\n self.min = min\n self.max = max", "def runPass(self, exposure_range, rate):\n r = rospy.Rate(rate)\n for i, exposure in enumerate(exposure_range):\n if rospy.is_shutdown():\n break\n\n self.current_exposure = exposure\n self.client.update_configuration(\n {\"exposure\": self.current_exposure})\n r.sleep()\n\n finished = (i >= (len(exposure_range)-1))\n if finished:\n optimal_exposure = max(self.scores, key=self.scores.get)\n self.reset()\n return optimal_exposure # an optimal exposure has been found\n else:\n return -1", "def range(self):\r\n\t\treturn max(self.sample) - min(self.sample)", "def _calculate_range_stats(self, x_copy):\n # get the min, max values of the data\n min_val_cur, max_val_cur = torch.aminmax(x_copy)\n\n # calculate new epoch range values\n epoch_min_val = torch.min(self.epoch_activation_min, min_val_cur)\n epoch_max_val = torch.max(self.epoch_activation_max, max_val_cur)\n\n self.epoch_activation_min.copy_(epoch_min_val)\n self.epoch_activation_max.copy_(epoch_max_val)\n\n # calculate the average batch activation range\n current_batch_range = max_val_cur - min_val_cur\n new_range = (\n self.average_batch_activation_range * self.num_batches_tracked\n + current_batch_range\n ) / (self.num_batches_tracked + 1)\n\n self.average_batch_activation_range = new_range\n self.num_batches_tracked += 1 # new batch was processed\n\n return x_copy", "def f_get_range(self, copy=True):\n raise NotImplementedError(\"Should have implemented this.\")", "def get_pc_per_range(model, class_name):\n class_total = model.class_counts[class_name]\n if model.num_runs is not None:\n class_total = model.num_runs * class_total * .33\n\n true_positives, totals = model.range_metrics_10[class_name]\n purities = [] # Accuracy per range (true positive/total)\n comps = []\n TP_count = 0\n total_count = 0\n\n for index in reversed(range(len(true_positives))):\n cur_p = 0 # Current purity\n cur_c = 0 # Current completeness\n TP_count += true_positives[index]\n total_count += totals[index]\n if total_count != 0:\n # positive class samples / totals # with prob in range\n cur_p = TP_count / total_count\n if class_total != 0:\n cur_c = TP_count / class_total\n\n purities.append(cur_p)\n comps.append(cur_c)\n purities.reverse()\n comps.reverse()\n return purities, comps", "def randrange(start: int, stop: int, step: int) -> int:\n ...", "def range(self):\n return self.timerange()", "def convert_range(g, op, block):\n\n start = g.get_node(op.input(\"Start\")[0])\n stop = g.get_node(op.input(\"End\")[0])\n step = g.get_node(op.input(\"Step\")[0])\n dtype = infer_type(start).checked_type.dtype\n\n params = []\n for param in (start, stop, step):\n param, infered = try_infer_value(param, g.get_params())\n if infered:\n param = param.tolist()\n if isinstance(param, list):\n param = param[0]\n if isinstance(param, _expr.Expr):\n param = _op.squeeze(param)\n else:\n param = _op.const(param, dtype=dtype)\n params.append(param)\n\n out = _op.transform.arange(params[0], params[1], params[2], dtype=dtype)\n g.add_node(op.output(\"Out\")[0], out)", "def set_bounds(\n self: A,\n lower: BoundValue = None,\n upper: BoundValue = None,\n method: str = \"clipping\",\n full_range_sampling: bool = False,\n a_min: BoundValue = None,\n a_max: BoundValue = None,\n ) -> A: # TODO improve description of methods\n lower, upper = _a_min_max_deprecation(**locals())\n bounds = tuple(a if isinstance(a, np.ndarray) or a is None else np.array([a], dtype=float) for a in (lower, upper))\n both_bounds = all(b is not None for b in bounds)\n # preliminary checks\n if self.bound_transform is not None:\n raise RuntimeError(\"A bounding method has already been set\")\n if full_range_sampling and not both_bounds:\n raise ValueError(\"Cannot use full range sampling if both bounds are not set\")\n checker = BoundChecker(*bounds)\n if not checker(self.value):\n raise ValueError(\"Current value is not within bounds, please update it first\")\n if not (lower is None or upper is None):\n if (bounds[0] >= bounds[1]).any(): # type: ignore\n raise ValueError(f\"Lower bounds {lower} should be strictly smaller than upper bounds {upper}\")\n # update instance\n transforms = dict(clipping=trans.Clipping, arctan=trans.ArctanBound, tanh=trans.TanhBound)\n if method in transforms:\n if self.exponent is not None and method != \"clipping\":\n raise ValueError(f'Cannot use method \"{method}\" in logarithmic mode')\n self.bound_transform = transforms[method](*bounds)\n elif method == \"constraint\":\n self.register_cheap_constraint(checker)\n else:\n raise ValueError(f\"Unknown method {method}\")\n self.bounds = bounds # type: ignore\n self.full_range_sampling = full_range_sampling\n # warn if sigma is too large for range\n if both_bounds and method != \"tanh\": # tanh goes to infinity anyway\n std_bounds = tuple(self._to_reduced_space(b) for b in self.bounds) # type: ignore\n min_dist = np.min(np.abs(std_bounds[0] - std_bounds[1]).ravel())\n if min_dist < 3.0:\n warnings.warn(f\"Bounds are {min_dist} sigma away from each other at the closest, \"\n \"you should aim for at least 3 for better quality.\")\n return self", "def mapRange(num, min1, max1, min2, max2, clamp=True):\n if(clamp and num < min1):\n return min2\n if(clamp and num > max1):\n return max2\n\n num1 = (num - min1) / (max1 - min1)\n num2 = (num1 * (max2 - min2)) + min2\n return num2", "def new_range(r):\n if isinstance(r, list) or isinstance(r, tuple) and len(r) == 2:\n lower = r[0]\n upper = r[1]\n else:\n lower = r\n upper = r\n lower = int(lower)\n upper = int(upper)\n return range(lower, upper + 1)", "def __init__(self, start, end, max):", "def get_batch_range(self):\n\n return NotImplementedError()", "def rangeSample(val, minLim, maxLim):\n\tif val < minLim or val > maxLim:\n\t\tval = randint(minLim, maxLim)\n\treturn val", "def __init__(self, input_dim=600+9, output_dim=1*3, dropout_prob=0., scale=3):\n super(F0_RNN_Scaled, self).__init__(input_dim=input_dim, output_dim=output_dim, dropout_prob=dropout_prob)\n self.scale = scale", "def reschedule(self, task, recur=False):\n raise NotImplementedError()", "def range_(self):\n return self.bset.range_", "def remap_interval(val, input_interval_start, input_interval_end, output_interval_start, output_interval_end):\n inputrange = float(input_interval_end) - float(input_interval_start) #finds length or original interval\n outputrange = float(output_interval_end) - float(output_interval_start) #finds length of target interval\n scaledvalue = float(val) - input_interval_start #finds how far through the interval the value is \n value = ( scaledvalue / inputrange ) * outputrange + output_interval_start\n return value", "def sum_range(lower, upper):\n\n def copies(pmin, pmax):\n if lower <= pmin and pmax <= upper:\n return True\n elif pmax > upper:\n return False\n return copies(pmin+50, pmax+60)\n\n return copies(0, 0)", "def normalise_modular_range(value, min, max):\n return numpy.mod(value-min, max-min)+min", "def setScaleX(self,startx,endx):\r\n if startx == endx:\r\n endx += 1\r\n self.scaleLock.acquire()\r\n self.scalex = [startx,endx]\r\n self.scaleLock.release()", "def rescale_action(self, action: np.ndarray) -> np.ndarray:\n action_rescaled = (\n action * (self.action_max - self.action_min) / 2.0\n + (self.action_max + self.action_min) / 2.0\n )\n return action_rescaled", "def recurrence_range(self, recurrence_range):\n\n self._recurrence_range = recurrence_range", "def range(start: int, stop: int = None, step: int = None) -> ObservableBase:\n from ..operators.observable.range import from_range\n return from_range(start, stop, step)", "def regrow(self, **kwargs):\n self.resources[self.resources >= self.min_value] += self.revive_rate\n self.resources[self.resources >= self.max_value] = self.max_value", "def rescale_to_range(\n array: vtk.vtkDoubleArray,\n to_range: typing.Tuple[float, float],\n rel_tol: float = sys.float_info.epsilon,\n abs_tol: float = sys.float_info.epsilon,\n) -> vtk.vtkDoubleArray:\n to_span = to_range[1] - to_range[0]\n assert to_span >= 0\n\n # The values need to span a positive range to be able to scale to `to_range`.\n # We use at least a small span derived from the tolerances.\n array_range = array.GetValueRange()\n array_span = array_range[1] - array_range[0]\n array_center = array_range[0] + array_span / 2\n from_range = (\n array_range\n if not math.isclose(array_span, 0.0, rel_tol=rel_tol, abs_tol=abs_tol)\n else (\n array_center - max(rel_tol * abs(array_center), abs_tol),\n array_center + max(rel_tol * abs(array_center), abs_tol),\n )\n )\n from_span = from_range[1] - from_range[0]\n\n assert not math.isclose(from_span, 0.0, rel_tol=rel_tol, abs_tol=abs_tol)\n factor = to_span / from_span\n\n result = vtk.vtkDoubleArray()\n result.SetNumberOfValues(array.GetNumberOfValues())\n for id in range(array.GetNumberOfValues()):\n result.InsertValue(\n id, to_range[0] + (array.GetValue(id) - from_range[0]) * factor\n )\n\n return result", "def rescale_array(array, old_range, new_range, dtype):\n if not HAS_NUMPY:\n LOGGER.error(\"The Python library numpy is required for this operation\")\n return\n\n old_min, old_max = old_range\n if array.min() < old_min or array.max() > old_max:\n ## truncate:\n array = numpy.clip(array, old_min, old_max)\n new_min, new_max = new_range\n old_delta = float(old_max - old_min)\n new_delta = float(new_max - new_min)\n if old_delta == 0:\n return ((array - old_min) + (new_min + new_max) / 2).astype(dtype)\n else:\n return (new_min + (array - old_min) * new_delta / old_delta).astype(dtype)", "def remap(\n n: int, start1: int, stop1: int, start2: int, stop2: int, withinBounds: bool = True\n) -> int:\n newval = (n - start1) / (stop1 - start1) * (stop2 - start2) + start2\n if not withinBounds:\n return newval\n if start2 < stop2:\n return constrain(newval, start2, stop2)\n else:\n return constrain(newval, stop2, start2)", "def set_temp_range(self, temp_range=(0, 0, 1)):\n args = list(temp_range)\n assert len(args) == 3\n minimum, maximum, step = args\n if all([isinstance(i, int) for i in args]):\n if (maximum - minimum) % step == 0:\n maximum += 1\n self.temperatures = np.arange(minimum, maximum, step, dtype=float)\n self.qptanalyzer.temperatures = self.temperatures", "def rescale(self):\n low = self.datasource.data[\"values\"].min()\n high = self.datasource.data[\"values\"].max()\n\n # force color to be at lower end of the colormap if\n # data is all equal\n if low == high:\n high += 1\n\n self.set_limits_minmax(low, high)", "def range(self, value: ArrayLike): # noqa: A003\n\n value = as_float_array(value, self.dtype)\n\n if not np.all(np.isfinite(value)):\n runtime_warning(\n f'\"{self.name}\" new \"range\" variable is not finite: {value}, '\n f\"unpredictable results may occur!\"\n )\n\n # Empty domain occurs during __init__ because range is set before domain\n attest(\n self._domain.size == 0 or value.size == self._domain.size,\n '\"domain\" and \"range\" variables must have same size!',\n )\n\n self._range = value\n self._function = None # Invalidate the underlying continuous function.", "def create_range(range_class):\n if not hasattr(range_class, 'name'):\n raise exceptions.ValidationError(\n \"A custom range must have a name attribute\")\n return Range.objects.create(\n name=range_class.name,\n proxy_class=_class_path(range_class))", "def range_partitioning(self) -> 'outputs.RangePartitioningResponse':\n return pulumi.get(self, \"range_partitioning\")", "def minmax_scale(X, feature_range=..., *, axis=..., copy=...):\n ...", "def scale(x, feature_range=(-1, 1)):\n \n # scale from 0-1 to feature_range\n min, max = feature_range\n #x = x * (max - min) + min\n #x = torch.add(torch.mul(x, (max-min)), min)\n x = x.mul(max-min).add_(min)\n return x", "def range(self, value):\n self.value_range = tuple([float(x) for x in value.split(':')])", "def scale_value(value, ip_range, domain=(0,1)):\n x1, x2 = domain\n y1, y2 = ip_range\n\n assert(y1 <= value <= y2)\n\n m = (x2 - x1)/(y2 - y1)\n b = y1 - m * x1\n return m * value - b", "def _multi_range(limit,\n value_repetitions=1,\n range_repetitions=1,\n dtype=tf.int32):\n return tf.reshape(\n tf.tile(\n tf.expand_dims(tf.range(limit, dtype=dtype), axis=-1),\n multiples=[range_repetitions, value_repetitions]), [-1])", "def setRange(self):\n # first determine ranges\n if len(self.activeWeapons) > 0:\n myPrimaryWeapon = self.activeWeapons[0]\n self.range = myPrimaryWeapon.myWeaponData.range * 1.0\n else:\n # no weapons left RUN\n self.mode = 'escape'\n self.range = 99999" ]
[ "0.54382", "0.534953", "0.5344448", "0.5320466", "0.53059083", "0.5240817", "0.5216645", "0.5215252", "0.52043605", "0.51910514", "0.5168013", "0.5165983", "0.5154208", "0.51533276", "0.5143823", "0.5138442", "0.5101876", "0.5092483", "0.5088871", "0.5082565", "0.5073314", "0.50654596", "0.5063034", "0.50550115", "0.50443494", "0.50381994", "0.5022811", "0.5006636", "0.50016916", "0.49922198", "0.49846593", "0.4979751", "0.4979751", "0.49607316", "0.4959603", "0.4958029", "0.49458405", "0.49416327", "0.49355063", "0.4935482", "0.4930885", "0.4927988", "0.49194565", "0.49102238", "0.49041468", "0.48983225", "0.4898267", "0.48927847", "0.48885348", "0.48732236", "0.48712695", "0.48556745", "0.48414493", "0.4839405", "0.48321882", "0.48295817", "0.48230115", "0.48221737", "0.4820181", "0.4813889", "0.48123425", "0.4805301", "0.48044744", "0.4803997", "0.4803166", "0.480229", "0.47910744", "0.4785712", "0.4781954", "0.47796035", "0.47787842", "0.47770792", "0.47762296", "0.4775326", "0.47722352", "0.4770996", "0.47670338", "0.47622338", "0.47551173", "0.47502485", "0.47499326", "0.474627", "0.47420585", "0.47352356", "0.47217998", "0.47144932", "0.4713663", "0.47119817", "0.47035342", "0.4699137", "0.46871477", "0.46848977", "0.468213", "0.467677", "0.4665826", "0.4665799", "0.46650726", "0.46639752", "0.46557614", "0.4651056", "0.46508658" ]
0.0
-1
Handler for 404 errors.
def handler404(request): response = render_to_response('404.html', {}, RequestContext(request)) response.status_code = 404 return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handleStatus_404(self):\n log.err('HTTP Error 404')", "def handler404(request):\n response = render_to_response('404.html', {})\n response.status_code = 404 # Other errors can be similarly configured\n return response", "def handle_404(request):\n return handle_error(request, django.http.HttpResponseNotFound(),\n \"404.html\")", "def not_found_error_handler(error):\r\n return render_template('error.404.html')", "def handler404(request, *args, **argv):\n response = render_to_response('404.html', {})\n response.status_code = 404\n return response", "def error_404(error):\n return '404 Error'", "def not_found():\n return HttpError(404)", "def handler404(request):\n \n #Setting the variable and template page for the 500 error\n response = render_to_response('404.html', {}, context_instance=RequestContext(request))\n response.status_code = 404\n return response", "def handle_notfound( environ ):\n return 404, [], make_error( 'Not Found', environ[ 'PATH_INFO' ] )", "def error404(e) -> tuple:\n return render_template('404.html'), 404", "def error_404(error):\n return 'Bummer, there is nothing at this URL.'", "def normal404(e):\n return jsonify({\"error\": \"Not found\"}), 404", "def not_found(e):\n return render_template(\"errors/404.html\"), 404", "def send404(self):\n\t\tself.send_error(404, \"File not found\")", "def error404(ex):\n # logger.error(ex)\n return \"error 404 : {0}\".format(ex.body)", "def error_404(error):\n return 'Data Service Error'", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return render_template(\"404.html\"), 404", "def resource_not_found(exc, request):\r\n request.response_status = \"404 Not Found\"\r\n return {'message': str(exc)}", "def page_not_found(e):\n return render_template('404.html'), 404", "def page_not_found(e):\n return render_template('404.html'), 404", "def page_not_found(err):\n return error_formatter(code='404', details=err, parm1=request.path)", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n\n return render_template('404.html'), 404", "def error_404(error):\n\n # Delete the error variable as unused\n del error\n # Render 404 page\n return render_template('404.html'), 404", "def error_404(self):\n response = self.render_template('404.html')\n response.status_code = 404\n return response", "def handle_error(self, e):\n code = getattr(e, 'code', 500) # Gets code or defaults to 500\n if code == 404:\n return self.make_response({\n 'message': 'not-found',\n 'code': 404\n }, 404)\n return super(MyApi, self).handle_error(e) # handle others the default way", "def page_not_found(e):\n\n # Respons to api request\n if request.accept_mimetypes.accept_json and \\\n not request.accept_mimetypes.accept_html:\n resp = jsonify({'error': 'not found'})\n resp.status_code = 404\n return resp\n\n return render_template('errors/404.html'), 404", "def page_not_found(e):\n return render_template(\"error/404.html\"), 404", "def not_found():\n raise cherrypy.HTTPError(404, \"Not Found.\")", "def error_not_found(error):\n return 'No page here, dood. 404!', 404", "def page_not_found(er):\n return render_template('errors.html'), 404", "def page_not_found():\n return render_template(\"errors/404.html\"), 404", "def page_not_found(e):\n return jsonify({\"error\": \"Not found\"}), 404", "def page_not_found(e):\n return jsonify({\"error\": \"Not found\"}), 404", "def page_not_found(e):\n return render_template(\"404.html\", page_title=404)", "def page_not_found(e):\n return render_template('404.html')", "def display_404(error):\n return render_template('/error.html'), 404", "def page_not_found(er): \n return render_template('errors.html'), 400", "def page_not_found(e):\n return 'Þessi vefslóð er ekki rétt', 404", "def err404():\n return render_template('404.html', year=datetime.now().year)", "def page_not_found(_error):\n return render_template('404.html'), 404", "def _send_404(self):\n template_filename = self._get_config_template('404')\n text = read_template(\n template_filename,\n title='%s - 404' % SERVER_NAME,\n header='404 &mdash; Page not found',\n URL=\"Nothing\")\n self._send_response(text, 404)", "def map_http_404(http_error, new_error_class=None, *args, **kwargs):\n\tif http_error.code == 404:\n\t\tif new_error_class is not None:\n\t\t\traise new_error_class(*args, **kwargs)\n\telse:\n\t\traise http_error", "def not_found(environ, start_response):\n start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])\n return [str.enocde('Not Found')]", "def test_error_handler_PageNotFound404(self):\n response = self.testapp.get('/notexistentpage/', expect_errors=True)\n self.assertEqual(404, response.status_int)\n self.assertIn('Page Not Found', response.normal_body)\n self.assertEqual('application/json', response.content_type)", "def not_found_error(error):\n return render_template('errors/404.html'), 404", "def handle_not_found(exception):\n return jsonify({\n 'message': 'Resource not found'\n }), 404", "def index_error_as_404(fun):\n @wraps(fun)\n def resource_handler(*args, **kwargs):\n try:\n return fun(*args, **kwargs)\n except IndexError:\n raise HTTPNotFound\n\n return resource_handler", "def error(self, code, message = ''):\n self.response.set_status(404)\n raise Exception(message)", "def not_found(error):\n\n return render_template('errors/404.html'), 404", "def page_not_found(error):\n\n return render_template('/errors/404.html'), 404", "def custom_404(request, exception=None):\n return render(request, \"404.html\", {\"exception\": exception})", "def not_found(error):\n pass", "def serve(self, request, *args, **kwargs):\n raise Http404", "def serve(self, request, *args, **kwargs):\n raise Http404", "def serve(self, request, *args, **kwargs):\n raise Http404", "def serve(self, request, *args, **kwargs):\n raise Http404", "def serve(self, request, *args, **kwargs):\n raise Http404", "def error():\n return render_template(\"404.html\")", "def not_found(self, error):\n return jsonify({'error': 'NOT FOUND'}), 404", "def page_not_found(er):\n return render_template('errors.html'), 500", "def replacement_not_found(klass, environ, start_response):\n raise HTTP404('path not found')", "def page_not_found(e):\n return render_template(\"500.html\"), 500", "def page_not_found(error):\n return render_template('error.html', error_msg=\"404 Page Not Found\", pagetitle=\"404 Page Not Found\"), 404", "def not_found_404(error):\n return jsonify({\n 'success': False,\n 'message': 'Resource not found',\n 'error': 404\n }), 404", "def view_404(request, url = None):\n res = render_to_response(\"404.html\", {\"PAGE_URL\": request.get_full_path()},context_instance=RequestContext(request))\n res.status_code = 404\n return res", "def page_not_found(error):\n return '<h1> 404 - Not Found</h1>', 404", "def not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)", "def page_not_found(error):\n return 'Esta Pagina no existe', 404", "def page_not_found(e):\n return jsonify({\"error\": \"page not found\"})", "def page_not_found(e):\n # Message to the user\n message = {\n \"err\":\n {\n \"msg\": \"This route is currently not supported. Please refer API documentation.\"\n }\n }\n # Making the message looks good\n resp = jsonify(message)\n # Sending OK response\n resp.status_code = 404\n # Returning the object\n return resp", "def not_found_error(error):\n current_app.logger.info(error)\n return error, \"404\"", "def page_not_found(e):\n media = session.query(Medium).all()\n return render_template('404.html', media=media), 404", "def test_404(self):\n for path in ('/foo', '/abs', '/abs/'):\n response = self.client.get(path)\n self.assertEqual(response.status_code,\n status.HTTP_404_NOT_FOUND,\n f'should get 404 for {path}')\n self.assertIn('text/html', response.content_type)\n\n response = self.client.get('/abs/1307.0001v999')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for known paper ID with '\n 'nonexistent version')\n response = self.client.get('/abs/alg-geom/07059999')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for valid old paper ID '\n 'with nonexistent paper number affix')\n response = self.client.get('/abs/astro-ph/0110242')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for known deleted paper')\n response = self.client.get('/abs/foo-bar/11223344')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for bad paper ID')", "def Write404Error(self):\n self.error(404)\n self.response.out.write(\n ''.join(['<html><head><title>404: Not Found</title></head>',\n '<body><b><h2>Error 404</h2><br/>',\n 'File not found</b></body></html>']))", "def Write404Error(self):\n self.error(404)\n self.response.out.write(\n ''.join(['<html><head><title>404: Not Found</title></head>',\n '<body><b><h2>Error 404</h2><br/>',\n 'File not found</b></body></html>']))", "def test_errors(self):\n rc = self.app.get('/this_should_not_exist', follow_redirects=True)\n assert b'404 error :(' in rc.data", "def raise404(logmsg):\n apache.log_error(logmsg, apache.APLOG_ERR)\n raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND", "def not_found(error):\n return make_response(jsonify({'error': 'Resource not found'}), 404)", "def handle_request_unknown(self, msg):\n\t\traise NotFound()", "def _page_not_found():\n return render_template(\n \"error.html\",\n title=\"Page Not Found\"\n ), 404", "def report_404(self):\n self.send_response(404)\n response = 'No such page'\n self.send_header(\"Content-type\", \"text/plain\")\n self.send_header(\"Content-length\", str(len(response)))\n self.end_headers()\n self.wfile.write(response)", "def page_error(e):\n\n return render_template('404.html')", "def page_not_found(_):\n return ANSWER_PAGE_NOT_FOUND, 404", "def handle_failure_request(self) -> HttpResponse:\n return HttpResponseNotFound()", "def view(self, url):\r\n abort(404)" ]
[ "0.8526426", "0.7946644", "0.7945557", "0.7903338", "0.7813557", "0.7758015", "0.7735152", "0.7727628", "0.771213", "0.7692894", "0.75920653", "0.7517226", "0.75166225", "0.75061744", "0.7470424", "0.7457936", "0.7394755", "0.7394755", "0.7394755", "0.7394755", "0.7394755", "0.7394755", "0.7394755", "0.739291", "0.7386452", "0.73738754", "0.73738754", "0.7352695", "0.7352693", "0.7352693", "0.7352693", "0.7352693", "0.7352693", "0.7352693", "0.7352693", "0.7352693", "0.73266476", "0.73135984", "0.7305928", "0.72949624", "0.72877824", "0.7285838", "0.7282106", "0.72416204", "0.7240895", "0.7221567", "0.7171367", "0.7171367", "0.71611905", "0.71566", "0.7145402", "0.7120244", "0.7117614", "0.71070683", "0.7092601", "0.70883363", "0.7087476", "0.70809567", "0.70637214", "0.7016516", "0.70103455", "0.6993426", "0.6983268", "0.69789463", "0.695707", "0.6953462", "0.69421285", "0.69363195", "0.69363195", "0.69363195", "0.69363195", "0.69363195", "0.69312596", "0.69198155", "0.6915615", "0.6880941", "0.6871618", "0.68617237", "0.685541", "0.6826661", "0.6797654", "0.67874604", "0.67838675", "0.67765415", "0.6766558", "0.67599094", "0.6758159", "0.6757265", "0.67513996", "0.67513996", "0.6739496", "0.6711969", "0.6698152", "0.66936064", "0.6689928", "0.6684497", "0.66838753", "0.6683133", "0.668292", "0.6680624" ]
0.76601875
10
Handler for 500 errors.
def handler500(request): response = render_to_response('500.html', {}, RequestContext(request)) response.status_code = 500 return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handler500(request):\n \n #Setting the variable and template page for the 500 error\n response = render_to_response('500.html', {}, context_instance=RequestContext(request))\n response.status_code = 500\n return response", "def handler500(request, *args, **argv):\n response = render_to_response('500.html', {})\n response.status_code = 500\n return response", "def handler500(request):\n import sys,traceback\n from django.template import Context, loader\n from django.http import HttpResponseServerError\n\n t = loader.get_template('500.html')\n typo, value, tb = sys.exc_info()\n\n return HttpResponseServerError(t.render(Context({\n 'exception_value': value,\n 'DEBUG': settings.TEMPLATE_DEBUG,\n 'value':typo,\n 'tb':traceback.format_exception(typo, value, tb)})))", "def internal_error_handler(error):\r\n return render_template('error.500.html')", "def error_500_handler(error):\n new_issue = 'https://github.com/andresriancho/w3af/issues/new'\n\n try:\n # Extract the filename and line number where the exception was raised\n exc_type, exc_value, exc_traceback = sys.exc_info()\n filepath = traceback.extract_tb(exc_traceback)[-1][0]\n filename = basename(filepath)\n lineno, function_name = get_last_call_info(exc_traceback)\n\n response = jsonify({'code': 500,\n 'message': str(error),\n 'filename': filename,\n 'line_number': lineno,\n 'function_name': function_name,\n 'exception_type': error.__class__.__name__,\n 'please': new_issue})\n except Exception, e:\n # I don't want to fail in the exception handler\n response = jsonify({'code': 500,\n 'exception': str(error),\n 'handler_exception': str(e),\n 'please': new_issue})\n\n response.status_code = 500\n return response", "def handle_500(e):\n try:\n raise e\n except:\n return traceback.format_exc(), 500", "def _default_error_handler(self, exception):\n\n self.log.error(exception)\n return '', 500", "def nondefault_500_error(request, template_name='500nondefault.html'):\n t = loader.get_template(template_name) # You need to create a 500.html template.\n ltype,lvalue,ltraceback = sys.exc_info()\n sys.exc_clear() #for fun, and to point out I only -think- this hasn't happened at \n #this point in the process already\n return http.HttpResponseServerError(t.render(Context({'type':ltype,'value':lvalue,'traceback':ltraceback})))", "def handle_500_error(_error):\n return make_response(jsonify(SERVER_ERROR), 500)", "def internal_server_error(e):\n return render_template(\"error/500.html\"), 500", "def server_error(e):\n return render_template('500.html'), 500", "def application_error(e):\n return render_template('500.html', error=e), 500", "def internal_error(e):\n return render_template(\"errors/500.html\"), 500", "def server_error(request):\n response = render(request, '500.html')\n response.status_code = 500\n\n return response", "def view_500(request, url = None):\n res = render_to_response(\"500.html\", context_instance=RequestContext(request))\n res.status_code = 500\n return res", "def flask_force_error():\n raise Exception('forced 500 error')", "def error_500(error):\n # Delete the error variable as unused\n del error\n # Render 404 page\n return render_template('500.html'), 500", "def internal_server_error(e):\n return render_template('500.html', error=repr(e)), 500", "def server_error(e):\n return 'Error while serving request', 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def err500():\n return render_template('404.html', year=datetime.now().year)", "def custom_500(request, exception=None):\n return render(request, \"500.html\", {\"exception\": exception})", "def internal_server_error(e):\n\n # Respons to api request\n if request.accept_mimetypes.accept_json and \\\n not request.accept_mimetypes.accept_html:\n resp = jsonify({'error': 'internal server error'})\n resp.status_code = 500\n return resp\n\n return render_template('errors/500.html'), 500", "def error_handler_middleware(app):\n def wsgi_app(environ, start_response):\n try:\n return app(environ, start_response)\n except Exception, e:\n logging.exception(e)\n # ... display a custom error message ...\n response = webapp.Response()\n response.set_status(500)\n response.out.write('Ooops! An error occurred...')\n response.wsgi_write(start_response)\n return ['']\n\n return wsgi_app", "def debug_error_handler(environ, start_response):\n exc_info = environ.get('com.xythian.shotweb.exception')\n write = start_response('500 Internal server error',\n [('Content-type', 'text/html')],\n exc_info)\n et, v, tb = exc_info\n import traceback\n traceback.print_exception(et, v, tb, file=sys.stderr)\n return cgitb.html(exc_info)", "def _send_internal_server_error(self):\n template_filename = self._get_config_template('500')\n text = read_template(\n template_filename,\n title='%s - Internal Error' % SERVER_NAME,\n header='Internal error')\n if not text:\n # fallback to hard-coded template\n text = TEMPLATE_500\n self._send_head(text, 500)\n if not self._header_only:\n self.wfile.write(text)", "def server_error(e):\n return 'Eftirfarandi villa kom upp: {}'.format(e), 500", "def internal_server_error(error):\n return render_template('error.html', error_msg=\"500 Internal Server error\", pagetitle=\"500 Internal Server error\"), 500", "def server_error(request, template_name='500.html'):\n # don't risk running context processors\n context = dict(settings.TEMPLATE_CONSTANTS)\n context['MEDIA_URL'] = settings.MEDIA_URL\n context['STATIC_URL'] = settings.STATIC_URL\n return render_to_response(template_name, context)", "def server_error(request):\n return defaults.server_error(request, template_name=get_template_name(request, \"500.html\"))", "def error(self, handler):\n pass", "def exceptions(e):\n ts = strftime('[%Y-%b-%d %H:%M]')\n tb = traceback.format_exc()\n logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\\n%s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n tb)\n return \"Internal Server Error\", 500", "def internal_server_error(err):\n return error_formatter(code='500_01', details=err)", "def internal_error():\n return HttpError(500)", "def page_not_found(e):\n return render_template(\"500.html\"), 500", "def after_request(response):\n # This avoids the duplication of registry in the log,\n # since that 500 is already logged via @app.errorhandler.\n if response.status_code != 500:\n ts = strftime('[%Y-%b-%d %H:%M]')\n logger.error('%s %s %s %s %s %s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n response.status)\n return response", "def handler404(request):\n \n #Setting the variable and template page for the 500 error\n response = render_to_response('404.html', {}, context_instance=RequestContext(request))\n response.status_code = 404\n return response", "def resp500(msg):\n app.logger.error(msg)\n return Resp({'message':msg, 'success':False}, status=500)", "def server_error(error):\n error_message = str(error)\n return render_template('error-pages/500-page.html', error_message=error_message, isFooter=True), 500", "def raise_500():\n raise ValueError('Foo!')", "def handle_uncaught_error(e):\n status_code = 500\n\n result = {\n \"error_message\": \"Unknown or unexpected error.\",\n \"error_code\": \"INTERNAL_SERVER_ERROR\"\n }\n return jsonify(result), status_code", "def _handle_code_500(self, server_response):\n filename = \"server_error.html\"\n try:\n with open(filename, 'w') as f:\n f.write(server_response)\n except IOError:\n print(\"Can not open file '%s' to write server answer. Skipping.\" % filename)\n return None\n return filename", "def _handle_error(cls, e, request):\r\n if e.check(InvalidRequest):\r\n msg = e.getErrorMessage()\r\n code = httpstatus.HTTP_STATUS_CODE_BAD_REQUEST[0]\r\n else:\r\n e.printTraceback()\r\n msg = 'Fatal Error'\r\n code = httpstatus.HTTP_STATUS_CODE_INTERNAL_SERVER_ERROR[0]\r\n\r\n cls._render(request, code, 'text/plain; charset=utf-8', msg)", "def api_error_handler(ex):\n try:\n status_code = ex.code\n except AttributeError:\n status_code = 500\n if flask.request.path.startswith('/api/'):\n app.logger.error(str(ex))\n if app.config.get('DEBUG', False):\n resp = flask.jsonify(message=str(ex))\n else:\n resp = flask.jsonify(message='Internal Server Error')\n resp.status_code = status_code\n return resp\n return flask.make_response(\n flask.render_template(\n 'error.html', exc=ex,\n title=error_titles.get(status_code, 'Error')),\n status_code)", "def server_error(err):\n log.error(err)\n return err.msg, 500", "def exceptions(e):\n ts = strftime('[%Y-%b-%d %H:%M]')\n tb = format_exc()\n app.logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\\n%s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n tb)\n return jsonify(message=\"Internal Server Error\"), 500", "def custom_exception_handler(exc, context):\n response = exception_handler(exc, context)\n\n return Response(\n str(exc),\n status=response.status_code if response is not None else HTTP_500_INTERNAL_SERVER_ERROR,\n )", "def handle_exception(self, exception, debug):\n if isinstance(exception, webapp2.HTTPException):\n self._RawWrite(\"%d %s\" % (exception.code, exception.title))\n self.response.set_status(exception.code)\n else:\n logging.exception(exception)\n self._RawWrite(\"500 Server Error\")\n self.response.set_status(500)", "def httperror( status_code=500, message=b'' ):", "def dc_server_error(request, template_name=ERROR_500_TEMPLATE_NAME):\n context = {\n \"site_logo\": getattr(\n settings, \"SITE_LOGO\", \"dc_theme/images/logo-with-text.png\"\n )\n }\n try:\n template = loader.get_template(template_name)\n except TemplateDoesNotExist:\n if template_name != ERROR_500_TEMPLATE_NAME:\n # Reraise if it's a missing custom template.\n raise\n return http.HttpResponseServerError(\n \"<h1>Server Error (500)</h1>\", content_type=\"text/html\"\n )\n return http.HttpResponseServerError(template.render(context, request))", "def error(self, request):\n if self.debug:\n import cgitb\n request.stdout.write('Content-Type: text/html\\r\\n\\r\\n' +\n cgitb.html(sys.exc_info()))\n else:\n errorpage = \"\"\"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">\n<html><head>\n<title>Unhandled Exception</title>\n</head><body>\n<h1>Unhandled Exception</h1>\n<p>An unhandled exception was thrown by the application.</p>\n</body></html>\n\"\"\"\n request.stdout.write('Content-Type: text/html\\r\\n\\r\\n' +\n errorpage)", "def after_request(response):\n # This avoids the duplication of registry in the log,\n # since that 500 is already logged via @app.errorhandler.\n if response.status_code != 500:\n ts = strftime('[%Y-%b-%d %H:%M]')\n message = '{0} {1} {2} {3} {4} {5}'.format(\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n response.status)\n print(message)\n return response", "def handle_error(self, request_handler, client_address):\n logger.debug('handle_error(%s:%s)' % client_address)", "def internal_error(error):\n\n db.session.rollback()\n return render_template('errors/500.html'), 500", "def internal_error(error):\n current_app.logger.info(error)\n db.session.rollback()\n return error, \"500\"", "def exception_handler(self, exception):\n pass", "def handle_err(self):\n pass", "def register_error_handler(app):\n def errorhandler(error):\n response = error.to_json()\n response.status_code = error.status_code\n print(response.status_code)\n return response\n\n app.errorhandler(ExceptionHandler)(errorhandler)", "def server_error(request, template_name='500.html', data=None):\n\n error_id = str(uuid.uuid4())\n error_message = 'Error ID: %s' % error_id\n if hasattr(request, 'session'):\n error_message = '%s. URLs leading up to this error: %s' % (\n error_message, url_history(request.session))\n logger.error(error_message)\n\n t = loader.get_template(template_name)\n\n if data:\n data = dict(data)\n else:\n data = dict()\n data['error_id'] = error_id\n result = http.HttpResponseServerError(t.render(Context(data)))\n return result", "def handle_expt(self):\r\n self._perform_on_error_handling()", "def handle_error(self, request, client_address):\n\t\tprint '-'*40\n\t\tprint 'Exception happened during processing of request from',\n\t\tprint client_address\n\t\timport traceback\n\t\ttraceback.print_exc() # XXX But this goes to stderr!\n\t\tprint '-'*40", "def http_error_handler(ex, req, resp, params):\n resp.body = encode.encode({\n 'status': 1,\n 'msg': 'HTTP error: ' + ex.status\n })", "def on_error(self, status_code, data):\n\t\tprint(\"error_code: \",status_code)", "def process_exception(self, request, exception):\n logging.error(\"ERROR\")\n logging.error(traceback.format_exc())\n response = set_response(\"Internal server error\", False, 500, {})\n return JsonResponse(response, status=response[\"http_code\"])", "def handle_exception(self, exception, debug_mode): # pylint: disable-msg=C0103\n self.error(500)\n logger = logging\n if self.fsm:\n logger = self.fsm.logger\n logger.exception(\"FSMHandler caught Exception\")\n if debug_mode:\n import traceback, sys, cgi\n\n lines = ''.join(traceback.format_exception(*sys.exc_info()))\n self.response.clear()\n self.response.out.write('<pre>%s</pre>' % (cgi.escape(lines, quote=True)))", "def root_simple_error_handler(exc, *args, app_name=''):\n\n #print('args',args)\n check_exception = 0\n for each_args in args:\n #print('each_args',each_args['view'].__module__)\n if each_args['view'].__module__ == 'hrms.views' or each_args['view'].__module__ == 'pms.views':\n #print('ok')\n check_exception = 1\n if isinstance(exc,ValidationError):\n print('ValidationError',exc)\n print('ValidationError',exc.get_codes())\n #n = dict(exc.detail)\n headers = {}\n if check_exception == 1:\n return Response({'error': exc.detail},status=exc.status_code,headers=headers)\n else:\n return Response(exc.detail,status=exc.status_code,headers=headers)\n\n elif isinstance(exc, exceptions.APIException):\n print('APIException',exc.get_full_details())\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['X-Throttle-Wait-Seconds'] = '%d' % exc.wait\n print('exc.detail',exc.detail)\n if check_exception == 1:\n return Response({'error': exc.detail},status=exc.status_code,headers=headers)\n else:\n return Response(exc.detail,status=exc.status_code,headers=headers)\n\n elif isinstance(exc, Http404):\n print('Http404')\n if check_exception == 1:\n return Response({'error': 'Not found'},status=status.HTTP_404_NOT_FOUND)\n else:\n return Response('Not found',status=status.HTTP_404_NOT_FOUND)\n\n elif isinstance(exc, PermissionDenied):\n print('PermissionDenied')\n if check_exception == 1:\n return Response({'error': 'Permission denied'},\n status=status.HTTP_403_FORBIDDEN)\n else:\n return Response('Permission denied',status=status.HTTP_403_FORBIDDEN)\n\n # Note: Unhandled exceptions will raise a 500 error.\n return None", "def handle_exception(self, exception, debug):\n if isinstance(exception, webapp2.HTTPException):\n context = {'error': \"%d %s\" % (exception.code, exception.title), 'detail': exception.detail}\n self.response.set_status(exception.code)\n else:\n logging.exception(exception)\n context = {'error': \"500 Server Error\"}\n self.response.set_status(500)\n return self.render_json(context)", "def handle_error(self, request, error):\n self.log.error(\"An error occurred at request \" + repr(request) + \": \" + repr(error))", "def handle_error(self, err): # pragma: no cover\n # log every exception raised in the application\n print('we ended up in the API handle_error()', err, err.__class__)\n\n # catch other HTTP errors\n if isinstance(err, HTTPException):\n original = getattr(err, \"original_exception\", None)\n return jsonify({\n 'success': False,\n 'error': err.code,\n \"message\": getattr(err.error, 'message')\n }), err.code\n\n # if 'message' attribute isn't set, assume it's a core Python exception\n if not getattr(err, 'message', None):\n original = getattr(err, \"original_exception\", None)\n return jsonify({\n 'message': 'Server has encountered an unknown error'\n }), 500\n\n # Handle application-specific custom exceptions\n return jsonify(**err.kwargs), err.http_status_code", "def error_handler(msg):\n print(\"Server Error: %s\" % msg)", "def jsonable_server_error(request, template_name='500.html'):\r\n if request.is_ajax():\r\n msg = {\"error\": \"The edX servers encountered an error\"}\r\n return HttpResponseServerError(json.dumps(msg))\r\n else:\r\n return server_error(request, template_name=template_name)", "def handle_api_error(e):\n return f\"Failed to call Giphy API: {e}\", 500", "def get_500_response(message):\n headers = HTTPHeaders.HTTPHeaders()\n add_default_headers(headers)\n headers[\"Connection\"] = \"close\"\n headers[\"Content-Length\"] = str(len(message))\n headers[\"Content-Type\"] = \"text/plain\"\n\n return HTTPResponse.HTTPResponse(version=1.0, status_code=500, phrase=\"Internal Error\",\n headers=headers, data=message)", "def error_handler(msg):\n print \"Server Error: %s\" % msg", "def error_handler(msg):\n print \"Server Error: %s\" % msg", "def register_error_handlers(self):\n\n def error_handler(error):\n if not isinstance(error, exceptions.HTTPException):\n error = exceptions.InternalServerError()\n return response.Response(bootstrap.card(body=_.span[_.p(style='color:#888')[error.description or ''],\n _.img(src=flask.url_for('mara_app.static',\n filename='mara.jpg'),\n style='margin-top:30px;max-width:100%;')]),\n title=f'{error.code} {error.name}',\n status=error.code)\n\n for cls in exceptions.HTTPException.__subclasses__():\n self.register_error_handler(cls, error_handler)", "def error_view_handler(request, exception, status):\n if status not in [400, 403, 404, 500]:\n status = 500\n\n return render(\n request,\n template_name=\"richie/error.html\",\n status=status,\n context={\n \"error\": exception,\n \"status\": status,\n \"title\": CONTEXT_ERRORS[status][\"title\"],\n \"content\": CONTEXT_ERRORS[status][\"content\"],\n },\n )", "def handle_exception(self,exc):\n logger.error(f\"Exception in request: {traceback.format_exc()}\")\n status_obj = status.HTTP_400_BAD_REQUEST\n if type(exc) is response.Http404:\n status_obj = status.HTTP_404_NOT_FOUND\n return Response(\n MediaUtil.generate_error_image(\n status_obj,\n str(exc),\n self.request.accepted_renderer.format),\n status=status_obj)", "def key_error_page(e):\n return render_template(\"index.html\", error=e), 500", "def wsgi_tool_error_handler(e):\n status_code = e.code\n result = {\n \"error_message\": e.description,\n \"error_code\": e.name.upper().replace(\" \", \"_\")\n }\n return jsonify(result), status_code", "def handle_error(self, e):\n code = getattr(e, 'code', 500) # Gets code or defaults to 500\n if code == 404:\n return self.make_response({\n 'message': 'not-found',\n 'code': 404\n }, 404)\n return super(MyApi, self).handle_error(e) # handle others the default way", "def error_handler(response, **kwargs):\n if 400 <= response.status_code <= 499:\n message = response.json()['error_description'] \\\n if 'error_description' in response.json() \\\n else response.json()['error_detail']\n raise ClientError(response, message)\n\n elif 500 <= response.status_code <= 599:\n raise ServerError(response)\n\n return response", "def internal_error(exception):\n app.logger.error(exception)\n return flask.make_response('server error', 500)", "def sm_error_handler(self, errors):\n try:\n yield\n except Exception as e:\n if issubclass(e.__class__, ManagerError) or \\\n issubclass(e.__class__, ManagerFatalError) or \\\n isinstance(e, ConnectionError) or \\\n xmlrpclib.ProtocolError or \\\n xmlrpclib.Fault:\n\n errors.append(repr(e))\n elif isinstance(e, socket.error):\n errors.append(repr(e))\n errors.append(\"Please make sure the server port is open.\")\n else:\n raise e", "def internal_error(error):\n return jsonify({'error': \"Internal Server Error. \"\n \"Bitte die Logdatei für Details anschauen.\"}), 500", "def test_500_internal_server_error(self):\n # create route to abort the request with the 500 Error\n @self.app.route('/500')\n def internal_server_error():\n abort(500)\n response = self.client.get('/500')\n self.assertEqual(response.status_code, 500)", "def handle_internal_error(exception):\n logging.error(exception)\n db.session.rollback()\n return jsonify({\n 'message': 'An unexpected internal error has occurred'\n }), 500", "def _send_error(self, req, code=500, message=''):\n headers = {'Content-Type': 'text/plain',\n 'Content-Length': str(len(message))}\n self._send_response(req, code, body=message, headers=headers)", "def _create_internal_server_error(self):\n body = self.server.create_error(\n 500, 'Internal Server Error',\n 'An unexpected error has occurred.',\n error=True\n )\n self._write_response(500, body, content_type=CONTENT_TYPE_ERROR)", "def page_not_found(er):\n return render_template('errors.html'), 500", "def internal_server_error(error): # pylint: disable=unused-argument\n response = jsonify(\n {\n \"success\": False,\n \"error_code\": 500,\n \"message\": \"Internal Server Error\",\n }\n )\n return response, 500", "async def instana_exception_handler(request, exc):\n try:\n span = async_tracer.active_span\n\n if span is not None:\n if hasattr(exc, 'detail') and (500 <= exc.status_code <= 599):\n span.set_tag('http.error', exc.detail)\n span.set_tag('http.status_code', exc.status_code)\n except Exception:\n logger.debug(\"FastAPI instana_exception_handler: \", exc_info=True)\n\n return await http_exception_handler(request, exc)", "def handle_unknown_errors(exc):\n return jsonify(dict(\n traceback=traceback.format_exc(),\n message=str(exc),\n )), 500" ]
[ "0.82592165", "0.82579106", "0.7647247", "0.7604049", "0.75217336", "0.7434948", "0.73925203", "0.7364613", "0.73038787", "0.7235353", "0.7229959", "0.7197154", "0.717115", "0.71682864", "0.71582335", "0.7123201", "0.7115274", "0.7104952", "0.70137286", "0.6970803", "0.6970803", "0.6970803", "0.6970803", "0.6970803", "0.6970803", "0.6970803", "0.6970803", "0.6968924", "0.69561833", "0.6952894", "0.69024277", "0.6894104", "0.6835361", "0.6833656", "0.6823917", "0.6781319", "0.6776891", "0.67679435", "0.6766525", "0.6755384", "0.6709148", "0.66522205", "0.66251844", "0.66014904", "0.659431", "0.65863", "0.65794104", "0.6568587", "0.65605694", "0.6531658", "0.6530825", "0.6530271", "0.651451", "0.65017045", "0.64660555", "0.6460899", "0.64465386", "0.6438164", "0.6416472", "0.6374473", "0.63542414", "0.63510495", "0.63186723", "0.6310512", "0.62949985", "0.62945116", "0.62919", "0.6278688", "0.62669814", "0.626217", "0.62573904", "0.6256346", "0.6250282", "0.62446284", "0.6241508", "0.62236875", "0.6216165", "0.6209923", "0.6203448", "0.61955285", "0.6192453", "0.6192453", "0.6169228", "0.61680144", "0.6149297", "0.6137747", "0.6116707", "0.61150396", "0.6113792", "0.6113101", "0.61045104", "0.60961586", "0.608548", "0.6084941", "0.6073569", "0.606113", "0.6051059", "0.6045582", "0.6039587", "0.6033729" ]
0.82960117
0
Join all party as party key
def all_party_key(all_party): if not all_party: all_party_key = 'all' elif isinstance(all_party, dict): sorted_role_name = sorted(all_party.keys()) all_party_key = gen_key_string_separator.join([ ('%s-%s' % ( role_name, '_'.join([str(p) for p in sorted(set(all_party[role_name]))])) ) for role_name in sorted_role_name]) else: all_party_key = None return all_party_key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combine_election_public_keys(\n election_public_keys: DataStore[GUARDIAN_ID, ElectionPublicKey]\n) -> ElectionJointKey:\n public_keys = map(lambda public_key: public_key.key, election_public_keys.values())\n\n return elgamal_combine_public_keys(public_keys)", "def party_id(self):\n pass", "def _join():\n df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],\n 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})\n other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],\n 'B': ['B0', 'B1', 'B2']})\n print(df.join(other, lsuffix='_caller', rsuffix='_other')) # 为重复 column 添加前缀\n print(df.set_index('key').join(other.set_index('key')))\n print(df.join(other.set_index('key'), on='key', how='right')) # left,right表示以哪边的index为准\n print(df.join(other.set_index('key'), on='key', how='inner')) # inner,outer 表示交集、并集", "def testCorrectJoin(self):\n b_tree = OOBTree()\n b_tree.update({1: \"Monkey D. Luffy\", 2: \"Roronoa Zoro\", 3: \"Nami\"})\n failed_counter = 0\n key = 1\n data = {\"from\":\"East Blue\"}\n (mod_data, mod_tree, failed_counter) = self.processing.join(b_tree, key, data, failed_counter)\n self.assertEqual(mod_data, {\"from\":\"East Blue\", \"right_data\":\"Monkey D. Luffy\"})\n self.assertEqual(len(mod_tree), 2)\n self.assertEqual(failed_counter, 0)", "def join(self, new_root, joined):\n self[self[joined]] = self[new_root]", "def secondary_keys(self):", "def merge_entities_on_identifiers(self) -> None:\n if self.forward_map:\n next_local_id = max(list(self.forward_map.keys())) + 1\n else:\n next_local_id = 1\n backward_keys = set(self.backward_map.keys())\n for kb in self.kbs:\n for p in kb.pathways:\n for e in p.entities:\n if e.xrefs:\n xref_overlap = set(e.xrefs) & backward_keys\n if xref_overlap:\n local_id = self.backward_map[xref_overlap.pop()]\n e.lid = local_id\n elif len(e.xrefs) == 1:\n self.forward_map[next_local_id] = [e.xrefs[0]]\n self.backward_map[e.xrefs[0]] = next_local_id\n e.lid = next_local_id\n next_local_id += 1\n else:\n print(e.xrefs)\n raise UnboundLocalError(\"Unknown identifiers\")\n\n kb.dump_pickle(kb.loc)\n self.save_id_dict()", "def build(self):\n party = self._party\n self.reset()\n\n # reset id to 'default' if party is exchange\n if party._type == PartyType.EXCHANGE:\n party._id = 'default'\n\n return party", "def add_party_assoc_to_product(self, party_id, product_id):\n print('Adding party assoc:', party_id, product_id)\n client = self.application.__init_blockchain_client__()\n response = client.addPartyAssociationToProduct(party_id, product_id)\n client.close()\n\n return response", "def join(self):\n pass", "def add_party(party_name, logo, members):\n size = len(parties) + 1\n new_party = {\n \"party_id\": size,\n \"party_name\": party_name,\n \"logo\": logo,\n \"members\": members\n }\n parties[size] = new_party", "def join(self):\n def is_zero(left, right):\n \"\"\"Two conditions need to be satisfied for a triple\n (cube1, cube2, i) give a nonzero i-join: no intervals\n in cube1 can have indices greater or equal to i, and\n no intervals in cube2 can have indices less than or\n equal to i.\n\n \"\"\"\n if left == tuple() or right == tuple():\n return False\n if isinstance(right, int):\n return right <= max(left)\n if isinstance(left, int):\n return left >= min(right)\n\n def _join(i, cube1, cube2, sign_exp):\n \"\"\"the i-th elementary join keeping track of signs.\n\n \"\"\"\n cube = Cube(cube1[:i] + (2,) + cube2[i + 1:])\n p, q = cube1[i], cube2[i]\n if (p, q) == (0, 1):\n return cube, sign_exp\n elif (p, q) == (1, 0):\n return cube, (1 + sign_exp) % 2\n else:\n return None, None\n\n if not self:\n return self\n\n if self.degree is None:\n raise TypeError(f'only for homogeneous elements')\n\n answer = self.zero()\n for k, v in self.items():\n for inds in combinations(range(len(k[0])), self.arity - 1):\n skip = False\n for i, (cube1, cube2) in zip(inds, pairwise(k)):\n if (is_zero(cube1.intervals, i) or\n is_zero(i, cube2.intervals)):\n skip = True\n break\n if not skip:\n non_zero = True\n sign_exp = 0\n cube = k[0]\n for i, next_cube in zip(inds, k[1:]):\n cube, sign_exp = _join(i, cube, next_cube, sign_exp)\n if cube is None:\n non_zero = False\n break\n if non_zero:\n answer += answer.create({(cube, ): (-1)**sign_exp})\n\n return answer", "def owningSet(self) -> ghidra.util.graph.KeyIndexableSet:\n ...", "def pg_secondary_keys(self):", "def join_params(**params):\n\tparam_list = get_sorted_keys(params)\n\tvalues = []\n\tfor k in param_list:\n\t\tvalues.append(k+'-'+join_items(params[k]))\n\treturn \"_\".join(values)", "def get_parties(self):\n con = self.db.init_db()\n cur = con.cursor()\n query = \"SELECT party_id, name,hqAddress,logoUrl from Party;\"\n cur.execute(query)\n data = cur.fetchall()\n party_list = []\n\n for i, items in enumerate(data):\n party_id, name, hqAddress, logoUrl = items\n party = dict(\n party_id=party_id,\n name=name,\n hqAddress=hqAddress,\n logoUrl=logoUrl\n )\n party_list.append(party)\n\n return dict(status=200, data=party_list)", "def _rewrite_join(self, node: saldag.Join):\n\n if node.is_mpc:\n out_rel = node.out_rel\n key_col_idx = 0\n # oversimplifying here. what if there are multiple singleton\n # coll_sets?\n singleton_coll_sets = filter(\n lambda s: len(s) == 1,\n out_rel.columns[key_col_idx].coll_sets)\n singleton_coll_sets = sorted(list(singleton_coll_sets))\n if singleton_coll_sets:\n trusted_party = next(iter(singleton_coll_sets[0]))\n hybrid_join_op = saldag.HybridJoin.from_join(node, trusted_party)\n parents = hybrid_join_op.parents\n for par in parents:\n par.replace_child(node, hybrid_join_op)", "def join(self, a, *args):\n mapping = self._mapping\n set_a = mapping.setdefault(a, [a])\n\n for arg in args:\n set_b = mapping.get(arg)\n if set_b is None:\n set_a.append(arg)\n mapping[arg] = set_a\n elif set_b is not set_a:\n if len(set_b) > len(set_a):\n set_a, set_b = set_b, set_a\n set_a.extend(set_b)\n for elem in set_b:\n mapping[elem] = set_a", "def join_data(self, base_data, join_data, base_field, join_fields):\n for data in base_data:\n extra = join_data[data[base_field]]\n for field in join_fields:\n data[field] = extra[field]\n \n return base_data", "def gen_keys():", "def project_key_foreign_to_local(self, row):\n \n ex_key = []\n for lfld, ffld in zip(self.local_fields, self.foreign_fields):\n if lfld.opt:\n ex_key.append('1')\n ex_key.append('%s.%s' % (row, ffld.name))\n \n return ', '.join(ex_key)", "def secondary_keys_dicts(self):", "def test_join(self):\n s = djset()\n s.add([1, 2, 3])\n s.add([4, 5, 6])\n s.add([2, 5])\n self.assertEquals({1, 2, 3, 4, 5, 6}, s.data[1])\n self.assertFalse(2 in s.data)", "def project_key_local_to_foreign(self, row):\n \n ex_key = []\n for lfld, ffld in zip(self.local_fields, self.foreign_fields):\n if ffld.opt:\n ex_key.append('1')\n ex_key.append('%s.%s' % (row, lfld.name))\n \n return ', '.join(ex_key)", "def mergelots(bigdict, tblstojoin, joincol, how='outer'):\n for tbl in tblstojoin:\n if tbl == tblstojoin[0]:\n bigtbl = bigdict[tbl].copy()\n else:\n bigtbl = bigtbl.merge(bigdict[tbl], how=how, on=joincol)\n return bigtbl", "def join_provenances(provenance1, provenance2):\n # Use a dict to join them\n joined = dict(\n (p, True) for p in provenance1\n )\n joined.update(\n (p, True) for p in provenance2\n )\n return list(joined.keys())", "def _group_by_provider(self):\n rslt = {}\n for line in self:\n if not line.company_id.currency_provider:\n continue\n\n if rslt.get(line.company_id.currency_provider):\n rslt[line.company_id.currency_provider] += line.company_id\n else:\n rslt[line.company_id.currency_provider] = line.company_id\n return rslt", "def mergeWith(self, others):", "def join(data, strategy, source_left, source_right, destination, key_left, key_right, prefix_left, prefix_right, presorted, buffersize, tempdir, cache, missing):\n source_left = data.get(source_left)\n source_right = data.get(source_right)\n\n kwargs = {}\n if key_left == key_right:\n kwargs['key'] = key_left\n else:\n kwargs['lkey'] = key_left\n kwargs['rkey'] = key_right\n\n if presorted is True:\n kwargs['presorted'] = presorted\n\n if buffersize is not None:\n kwargs['buffersize'] = buffersize\n\n if tempdir:\n kwargs['tempdir'] = tempdir\n\n if 'anti' not in strategy:\n if prefix_left is not None:\n kwargs['lprefix'] = prefix_left\n if prefix_right is not None:\n kwargs['rprefix'] = prefix_right\n\n if strategy not in ['join', 'antijoin', 'hashjoin', 'hashantijoin']:\n kwargs['missing'] = missing\n\n if strategy == 'join':\n o = petl.join(source_left, source_right, **kwargs)\n elif strategy == 'leftjoin':\n o = petl.leftjoin(source_left, source_right, **kwargs)\n elif strategy == 'lookupjoin':\n o = petl.lookupjoin(source_left, source_right, **kwargs)\n elif strategy == 'rightjoin':\n o = petl.rightjoin(source_left, source_right, **kwargs)\n elif strategy == 'outerjoin':\n o = petl.outerjoin(source_left, source_right, **kwargs)\n elif strategy == 'antijoin':\n o = petl.antijoin(source_left, source_right, **kwargs)\n elif strategy == 'hashjoin':\n o = petl.antijoin(source_left, source_right, **kwargs)\n elif strategy == 'hashleftjoin':\n o = petl.hashleftjoin(source_left, source_right, **kwargs)\n elif strategy == 'hashlookupjoin':\n o = petl.hashlookupjoin(source_left, source_right, **kwargs)\n elif strategy == 'hashrightjoin':\n o = petl.hashrightjoin(source_left, source_right, **kwargs)\n\n data.set(destination, o)", "def _join(lst, key, sep=\";\"):\n return sep.join([d[key] for d in lst if d[key]])", "def _group_by_bank(self):\n rslt = {}\n for company in self:\n if not company.indexa_currency_provider:\n continue\n\n if rslt.get(company.indexa_currency_provider):\n rslt[company.indexa_currency_provider] += company\n else:\n rslt[company.indexa_currency_provider] = company\n return rslt", "def append_cand():\n if col_i == down_col:\n return (lastrow[col_i] +\n [[key + [append_key(lastrow[col_i], left_struc, key)],\n right_elem]])", "async def join(self, ctx, key: str):\n await ctx.message.delete()\n async with ctx.typing():\n data = await self.config.guild(ctx.guild).all()\n if data[\"private\"]:\n try:\n if ctx.author.voice.channel.id == data[\"pstart\"]:\n if key in data[\"pchannels\"]:\n await ctx.author.move_to(ctx.guild.get_channel(data[\"pchannels\"][key]))\n else:\n await self.sendNotInStartChannelMessage(ctx, data[\"pstart\"])\n except AttributeError:\n await self.sendNotInStartChannelMessage(ctx, data[\"pstart\"])\n else:\n await ctx.send(_(\"Private rooms are not enabled on this server.\"))", "def merge_working_sets(self, other):\n\n for dist in other.by_key.values(): self.add(dist)\n return self", "def __getSupportingChords(self, key):\n lookupTable = {\n 'C': ('F', 'Dm', 'C', 'Am', 'G', 'Em'),\n 'Am': ('F', 'Dm', 'C', 'Am', 'G', 'Em'),\n\n 'G': ('C', 'Am', 'G', 'Em', 'D', 'Bm'),\n 'Em': ('C', 'Am', 'G', 'Em', 'D', 'Bm'),\n\n 'D': ('G', 'Em', 'D', 'Bm', 'A', 'F#m'),\n 'Bm': ('G', 'Em', 'D', 'Bm', 'A', 'F#m'),\n\n 'A': ('D', 'Bm', 'A', 'F#m', 'E', 'C#m'),\n 'F#m': ('D', 'Bm', 'A', 'F#m', 'E', 'C#m'),\n\n 'E': ('A', 'F#m', 'E', 'C#m', 'B', 'G#m'),\n 'C#m': ('A', 'F#m', 'E', 'C#m', 'B', 'G#m'),\n\n 'B': ('E', 'C#m', 'B', 'G#m', 'F#', 'Gb', 'Ebm'),\n 'G#m': ('E', 'C#m', 'B', 'G#m', 'F#', 'Gb', 'Ebm'),\n\n 'F#': ('B', 'G#m', 'F#', 'Gb', 'Ebm', 'Db', 'Bbm'),\n 'Gb#': ('B', 'G#m', 'F#', 'Gb', 'Ebm', 'Db', 'Bbm'),\n 'Ebm': ('B', 'G#m', 'F#', 'Gb', 'Ebm', 'Db', 'Bbm'),\n\n 'Db': ('F#', 'Gb', 'Eb', 'Db', 'Bbm', 'Ab', 'Fm'),\n 'Bbm': ('F#', 'Gb', 'Eb', 'Db', 'Bbm', 'Ab', 'Fm'),\n\n 'Ab': ('Db', 'Bbm', 'Ab', 'Fm', 'Eb', 'Cm'),\n 'Fm': ('Db', 'Bbm', 'Ab', 'Fm', 'Eb', 'Cm'),\n\n 'Eb': ('Ab', 'Fm', 'Eb', 'Cm', 'Bb', 'Gm'),\n 'Cm': ('Ab', 'Fm', 'Eb', 'Cm', 'Bb', 'Gm'),\n\n 'Bb': ('Eb', 'Cm', 'Bb', 'Gm', 'F', 'Dm'),\n 'Gm': ('Eb', 'Cm', 'Bb', 'Gm', 'F', 'Dm'),\n\n 'F': ('Bb', 'Gm', 'F', 'Dm', 'C', 'Am'),\n 'Dm': ('Bb', 'Gm', 'F', 'Dm', 'C', 'Am')\n }", "def recipient_public_key(self):", "def generate_keys(g, o):\n priv = o.random()\n pub = priv * g\n\n return (priv, pub)", "def submitting_party(self):\n party = {\n 'businessName': self.name,\n 'address': address_utils.get_address_from_db2(self.legacy_address)\n }\n if self.phone_number:\n party['phoneNumber'] = self.phone_number\n return party", "def keyed_wheel_cipher(key, pool=None):\n if pool is None:\n pool = ascii_uppercase + digits\n original_pool = {}\n original_pool = list(pool)\n keyed_pool = makealphabet(key)\n # print(keyed_pool)\n return dict(zip(keyed_pool, original_pool))", "def _rewrite_join(self, node: saldag.Join):\n\n out_stored_with = node.out_rel.stored_with\n ordered_pars = [node.left_parent, node.right_parent]\n\n left_stored_with = node.get_left_in_rel().stored_with\n right_stored_with = node.get_right_in_rel().stored_with\n in_stored_with = left_stored_with | right_stored_with\n\n for parent in ordered_pars:\n if node.is_upper_boundary():\n # Entering mpc mode so need to secret-share before op\n out_rel = copy.deepcopy(parent.out_rel)\n out_rel.rename(out_rel.name + \"_close\")\n out_rel.stored_with = copy.copy(in_stored_with)\n # create and insert close node\n close_op = saldag.Close(out_rel, None)\n close_op.is_mpc = True\n saldag.insert_between(parent, node, close_op)\n # else:\n # raise Exception(\n # \"different stored_with on non-upper-boundary join\", node.debug_str())\n if node.is_leaf():\n if len(in_stored_with) > 1 and len(out_stored_with) == 1:\n target_party = next(iter(out_stored_with))\n node.out_rel.stored_with = copy.copy(in_stored_with)\n sal._open(node, node.out_rel.name + \"_open\", target_party)", "def key(self):\r\n\t\treturn ( hashE(self.edge),self.p1,self.p2);", "def get_join_key(self, event):\n return self._get_join_key(event)", "def keysAll():", "def join_govern_parties(df):\n govern_selector = df.index.get_level_values(OPTION).isin(['ERC-CatSí', 'JUNTSxCAT'])\n govern_df = df[govern_selector].groupby(CONSTITUENCY).sum()\n govern_df[OPTION] = 'Govern'\n govern_df.set_index(OPTION, append=True, inplace=True)\n no_govern_df = df[~govern_selector]\n return no_govern_df.append(govern_df)", "def _common_keypoints(self, *others):\n matches = self._match_table.dropna(0)\n keypoints = []\n for other in others:\n indices = matches[other.position.id].astype(int).values\n # the coordinates have to be flipped for later processing, hence the ::-1\n keypoints.append(other.keypoints[indices, ::-1])\n return np.stack(keypoints, axis=1)", "def membership(self, key):\n pass", "def join(tw_df, rtt_df):\n original_tw_id = []\n author_ids = []\n rtt_dates = []\n groups = rtt_df.groupby('original_tweet_id').groups\n for k in groups.keys():\n l_a = []\n l_r = []\n original_tw_id.append(k)\n for index in groups[k]:\n line = rtt_df.iloc[[index]]\n l_a.append(int(line['author_id']))\n l_r.append(str(line['retweet_date']))\n author_ids.append(l_a)\n rtt_dates.append(l_r)\n \n df_temp = pd.DataFrame()\n df_temp['natural_key'] = original_tw_id\n df_temp['rtt_author_ids'] = author_ids\n df_temp['retweet_dates'] = rtt_dates\n df_temp = df_temp.set_index('natural_key')\n tw_df = tw_df.set_index('natural_key')\n return tw_df.join(df_temp)", "def clean_party(self, party_id, party):\n\n cleaned_party = {\n 'party_id': party_id,\n 'party_name': party['name'],\n 'registered_date': party['founding_date'],\n 'register': party['party_sets'][0]['slug'],\n }\n return cleaned_party", "def sql_related_join(my_table='', mid_table='', join_table='', keys=[], colName='name', **kw):\n\t#SQL to join my table to the middle table and then from the middle table to the end join table\n\tif (my_table=='') or (mid_table=='') or (join_table=='') or (keys==[]):\n\t\tsql=''\n\t\tclauseTables=[]\n\telse:\n\t\tclauseTables=[mid_table, join_table]\n\t\tsql = my_table+\".id = \"+mid_table+\".\"+my_table+\"_id AND \"+join_table+\".id = \"+mid_table+\".\"+join_table+\"_id AND (\"\n\t\tfor key in keys:\n\t\t\tsql += join_table+\".\"+colName+\" = '\"+key+\"' OR \"\n\t\tsql = sql[0:-4]+\")\"\n\treturn dict(sql=sql, clauseTables=clauseTables)", "def _derive_key(\n self, passphrase: str, otp: YubikeyOTP, *args : bytes\n ) -> bytes:\n return self._context_kdf.derive(\n combine_keys(\n passphrase.encode('utf-8'),\n otp.token.private_uid,\n *args\n )\n )", "def join(phenny, input):\n # Can only be done in privmsg by an admin\n if input.sender.startswith('#'): return\n if input.admin: \n channel, key = input.group(1), input.group(2)\n if not key: \n phenny.write(['JOIN'], channel)\n else: phenny.write(['JOIN', channel, key])", "def join_distributions(a, b):\n assert a.keys() == b.keys()\n return {k: a[k] + b[k] for k in a}", "def merge_data_base_kvks(self):\n\n # create a data frame with all the unique kvk number/name combi\n df = self.url_df[[KVK_KEY, NAME_KEY]]\n df.set_index(KVK_KEY, inplace=True, drop=True)\n df = df[~df.index.duplicated()]\n\n # also create a data frame from the unique address kvk's\n name_key2 = NAME_KEY + \"2\"\n df2 = self.address_df[[KVK_KEY, NAME_KEY]]\n df2 = df2.rename(columns={NAME_KEY: name_key2})\n df2.set_index(KVK_KEY, inplace=True, drop=True)\n df2 = df2[~df2.index.duplicated()]\n\n # merge them on the outer, so we can create a combined kvk list\n df3 = pd.concat([df, df2], axis=1, join=\"outer\")\n\n # replace al the empty field in NAME_KEY with tih\n df3[NAME_KEY].where(~df3[NAME_KEY].isnull(), df3[name_key2], inplace=True)\n\n df3.drop(name_key2, inplace=True, axis=1)\n\n difference = df3.index.difference(df2.index)\n new_kvk_name = df3.loc[difference, :]\n\n n_before = self.address_df.index.size\n self.address_df.set_index(KVK_KEY, inplace=True)\n\n # append the new address to the address data base\n self.address_df = pd.concat([self.address_df, new_kvk_name], axis=0, sort=True)\n self.address_df.sort_index(inplace=True)\n self.address_df.reset_index(inplace=True)\n try:\n self.address_df.drop([\"index\"], axis=1, inplace=True)\n except KeyError as err:\n self.logger.info(err)\n\n n_after = self.address_df.index.size\n self.logger.info(\"Added {} kvk from url list to addresses\".format(n_after - n_before))", "def __generate_key_from_list_of(self, list_of_keys):\r\n list_of_keys = list(list_of_keys)\r\n list_of_keys.sort()\r\n return \",\".join(list_of_keys)", "def key_join(self, key, encode=True):\n if isinstance(key, str):\n parts = key.split('/')\n else:\n parts = key\n new_parts = []\n\n for part in parts:\n if isinstance(part, bytes):\n part = part.decode(\"utf-8\")\n if encode:\n part = quote(str(part))\n new_parts.append(part)\n\n return '/'.join(new_parts)", "def test_channel_join_private_global():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token2, \"Chill Soc\", False)\n\n\n # Global DREAM owner attempt to join a private channel \n channel_join_v2(auth_token1, channel_id1[\"channel_id\"])\n\n # Check if the global owner successfully join private channel\n assert channels_list_v2(auth_token1) == {\n 'channels': [\n \t{\n \t\t'channel_id': 1, # channel id start at 1 or 0 is worth checking ? It's currently start at 1.\n \t\t'name': 'Chill Soc',\n \t}\n ],\n }", "def _expand_keys(entities):\n keys = list(entities.keys())\n values = list(product(*[entities[k] for k in keys]))\n return [{k: v for k, v in zip(keys, combs)} for combs in values]", "def left_join_list_one():\n return[\n ['wrath', 'anger', 'delight'],\n ['fond', 'enamored', 'averse'],\n ['guide', 'usher', 'jam'],\n ['outfit', 'garb', 'follow'],\n ['diligent', 'employed', 'idle'],\n ]", "def set_public_key(self, election_joint_public_key: ElementModP) -> ElectionBuilder:\n self.election_joint_public_key = election_joint_public_key\n return self", "def generate_random_agent_keys():\n\n new_random_agent = list(all_waypoints)\n random.shuffle(new_random_agent)\n\n converted_set = np.array(new_random_agent)\n i = 0\n for key in new_random_agent:\n converted_set[i] = names_lookup[key]\n i+= 1\n\n #print converted_set\n return converted_set", "def i_e_c():\r\n parties = {}\r\n \r\n print(\"Independent Electoral Commission\")\r\n print(\"--------------------------------\")\r\n party = input(\"Enter the names of parties (terminated by DONE):\\n\")\r\n \r\n while party != 'DONE':\r\n if party:\r\n if not(party in parties):\r\n parties[party] = 1\r\n else:\r\n parties[party] += 1\r\n \r\n party = input('')\r\n \r\n parties2 = sorted(list(parties.keys())) \r\n \r\n if len(parties) > 0:\r\n print(\"\\nVote counts:\")\r\n \r\n for i in parties2:\r\n print(i.ljust(10) + ' -', parties[i])", "def hjoin(first_sygroup, second_sygroup):\n visitor = HJoinVisitor(first_sygroup)\n second_sygroup.visit(visitor)", "def partSchemes(self):\n out = {}\n for idx in subsets(range(1, self._.d + 1)):\n if len(idx) > 0 and len(idx) < self._.d and idx != [1]:\n try:\n out[tuple(idx)] = self.merge(*idx)\n except IndexError:\n pass\n return out", "def pair_parking(streetvolume):\r\n spaces = gpd.read_file(raw_loc + '/onstreet_parking/Sfpark_OnStreetParkingCensus_201404.shp')\r\n spaces.crs = {'init': 'epsg:2227'}\r\n spaces = spaces.to_crs(epsg =4326)\r\n spaces = spaces[spaces.PRKNG_SPLY < 1000]\r\n spaces.sort_values(by = 'PRKNG_SPLY', ascending = False, inplace = True)\r\n spaces = spaces[['geometry', 'PRKNG_SPLY', 'ST_NAME']]\r\n spaces.rename(columns = {'PRKNG_SPLY':'park_supply'}, inplace = True)\r\n total_join = gpd.tools.sjoin(streetvolume, spaces, how= 'left')\r\n total_join ['park_supply']= total_join.apply(lambda x: 0 if x['streetname'] != x['ST_NAME'] else x['park_supply'], axis = 1)\r\n total_join.sort_values(by = 'park_supply', ascending = False, inplace = True)\r\n total_join.drop_duplicates(subset = ['lineid'], inplace = True)\r\n total_join.to_file(proc_loc+ '/final_streets/SF_Street_Data.shp')\r\n total_join.drop(columns = ['index_right', 'geometry', 'ST_NAME'], inplace = True)\r\n total_join.to_sql('street_volume_data', conn, if_exists = 'replace')\r\n\r\n return", "def made_key(self):\n \n # select a random number from 1 to infinity \n ran_number = random.randint(1,99)\n\n # create a random set based on the first number you chose \n set = xrange(ran_number,28*ran_number,ran_number)\n\n # increase the value of every number in the set \n for item in set:\n item += 3\n Code_Fouad_Teniou.my_key.append(item)\n\n #return a random key \n return Code_Fouad_Teniou.my_key", "async def async_join_players(self, group_members):\n\n _LOGGER.debug(\n \"%s wants to add the following entities %s\",\n self.entity_id,\n str(group_members),\n )\n\n \"\"\"Make sure self.zone is or becomes master.\"\"\"\n await self.coordinator.data.zone_master(self.zone_id, True)\n\n entities = [\n entity\n for entity in self._casatunes_entities()\n if entity.entity_id in group_members\n ]\n\n for client in entities:\n if client != self:\n await self.coordinator.data.zone_join(self.zone_id, client.zone_id)\n\n await self.coordinator.async_refresh()\n await self.sync_master()", "def sub_sensitive_key(target_dict, sensitive_key_names):\r\n for sensitive_key in sensitive_key_names:\r\n if sensitive_key in target_dict:\r\n target_dict[sensitive_key] = generate_random_string(6)\r\n\r\n return target_dict", "def join ( self ) :\n raise AbstractMethodException( self , \"join\" )", "def build_ownership_map(table, key_from_row, value_from_row):\n return _build_ownership_map_from_rows(\n sa.select(table.c).execute().fetchall(),\n key_from_row,\n value_from_row,\n )", "def join_country_code_data(daily_data, country_code_data):\n #new columns: country, country_code, geometry\n return country_code_data.merge(daily_data, left_on = 'country', right_on = 'Country/Region').drop(['country'], axis=1)", "def add_parties(self, *parties) -> None:\n\n for party in parties:\n self._route_table['route_table'][party.get_id()] = party.to_entry_point(\n )", "def get_joins(self, p, vv):\n self._get_joins(p, vv)", "def build_messy_lookup_lad(source,dest):\n la = QuickGrid().open(source)\n\n lookup = QuickGrid()\n lookup.header = [\"gss-code\",\"local-authority-code\"]\n\n possible = [\"gss-code\",\"archaic-gss-code\"]\n possible = [p for p in possible if p in la.header]\n for r in la:\n for p in possible:\n if r[p]:\n values = r[p].split(\",\")\n for v in values:\n lookup.add([v,r[\"local-authority-code\"]])\n \n lookup.save(dest,force_unicode=True)", "def inner_join(sorted1, sorted2, key1, key2):\n p1 = 0\n p2 = 0\n result = []\n\n while (p1 < len(sorted1) and p2 < len(sorted2)):\n # if entries\n if sorted1[p1][key1] == sorted2[p2][key2]:\n entry = {}\n entry.update(sorted1[p1])\n entry.update(sorted2[p2])\n result.append(entry)\n p2 += 1\n elif sorted1[p1][key1] < sorted2[p2][key2]:\n p1 += 1\n elif sorted1[p1][key1] > sorted2[p2][key2]:\n p2 += 1\n return result", "def __gen_keys__(self):\n if self.seed == b'':\n self.seed = urandom(self.seed_size)\n\n n_prev = Node(hash=hash_factory(data=bytes(self.seed)).digest())\n self.keys.insert(0, n_prev)\n\n for i in range(1, self.l + 1):\n n = Node(hash=hash_factory(data=bytes(n_prev.hash)).digest())\n self.keys.insert(0, n)\n n_prev = n\n\n # Add the decoy nodes as parents of pair nodes.\n # The pair nodes will _always_ be the right child of the decoy nodes.\n for i in range(2, self.l + 1, 2):\n n_pair = self.keys[i] # type: Node\n n_impair_prev = self.keys[i-1] # type: Node\n n_pair.parent = Node(hash=bytes(n_impair_prev.hash))\n n_pair.parent.right_child = n_pair", "def keyEquivalent( self ):\n\t\treturn None", "def join_duplicate_keys(ordered_pairs):\n d = {}\n for k, v in ordered_pairs:\n if k in d:\n if isinstance(d[k], list):\n d[k].append(v)\n else:\n newlist = []\n newlist.append(d[k])\n newlist.append(v)\n d[k] = newlist\n else:\n d[k] = v\n return d", "def join(data):\n username, room = data['username'], data['room']\n join_room(room)", "def join_rooms(self):\n logging.info(\"Joining MUC rooms\")\n xrooms = self.botconfig.findall('rooms/muc')\n rooms = {}\n for xroom in xrooms:\n rooms[xroom.attrib['room']] = xroom.attrib['nick']\n for room in set(self.rooms.keys()).difference(rooms.keys()):\n logging.info(\"Parting room %s.\" % room)\n self.plugin['xep_0045'].leaveMUC(room, self.rooms[room])\n del self.rooms[room]\n for room in set(rooms.keys()).difference(self.rooms.keys()):\n self.rooms[room] = rooms[room]\n logging.info(\"Joining room %s as %s.\" % (room, rooms[room]))\n self.plugin['xep_0045'].joinMUC(room, rooms[room])", "def __add_author(self, key_name, others_names, personal_information):\n for name in others_names:\n self.author_to_authorID[name] = (key_name, personal_information)", "def consolidate_other(self):\n for set in [self.clients, self.shutdown_clients]:\n for c in set:\n client_other = set[c].get('other', {})\n for k in client_other.keys():\n if k == 'prefix_paths':\n if not self.other.get('prefix_paths'):\n self.other['prefix_paths'] = []\n # Repeatedly converting the array to a dict and back is inefficient\n # but this will happen only once per client and the number of prefix\n # paths should be small so overall impact should be very low\n pp = client_other.get('prefix_paths', [])\n if pp:\n self.other['prefix_paths'].extend(pp)\n self.other['prefix_paths'] = sorted(list(dict.fromkeys(self.other['prefix_paths'])))", "def make_consistent(self):\r\n\r\n for key in self.get_keys():\r\n self.eliminate_key(key)\r\n\r\n for i_temp in self.indexes(): #i will be a note index\r\n for j_temp in self.get_keys_from_note(i_temp):\r\n if self.key_dict_contains(j_temp):\r\n self.add_key(j_temp,Index(i_temp))\r\n## self.key_dict[j_temp].add(str(Index(i_temp)))\r\n else:\r\n self.initiate_new_key(j_temp,Index(i_temp))", "def lineup_ids(self):\n lineup_ids = {}\n for team_id, team_players in self.current_players.items():\n players = [str(player_id) for player_id in team_players]\n sorted_player_ids = sorted(players)\n lineup_id = \"-\".join(sorted_player_ids)\n lineup_ids[team_id] = lineup_id\n return lineup_ids", "def _keys_in_groupby(move):\n return (move.picking_id, move.product_id.responsible_id)", "async def join(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n return\n player = ctx.message.author.name\n if player.lower() in list(tod_games[room]['participants'].keys()):\n await amor_manager.say(\"{}... you're already playing Truth or Dare here!\".format(room))\n else:\n tod_games[room]['participants'][player.lower()] = {'spins': 0}\n await amor_manager.say(\"{} has joined Truth or Dare!\".format(player))", "def buildKey( self, needle ):\n return '§'.join([needle[k] for k in self._keys])", "def crack_keys(card_pk, door_pk): \n card_loop = find_loop_size(card_pk)\n door_loop = find_loop_size(door_pk)\n\n card_key = transform(door_pk, card_loop)\n door_key = transform(card_pk, door_loop)\n\n assert card_key == door_key\n \n return card_key", "def join_kadaster_bag_info(kadaster_gdf, bag_gdf):\n return gpd.sjoin(bag_gdf, kadaster_gdf, op=\"within\")", "def update_parties(self, *parties) -> None:\n\n for party in parties:\n self._route_table['route_table'][party.get_id()] = party.to_entry_point(\n )", "def adjoint(self): # pragma: no cover\r\n raise NotImplementedError()", "def natural_join(*Rs):\n for rs in product(*Rs):\n #need to test all combintions of table rows to see if they conflict\n if all([dict_match(r, s) for r, s in combinations(rs, 2)]):\n yield reduce(dict_merge, rs, {})", "def join(cfg, model):\n cfg[\"fake\"] = \"Faker\"\n cfg[\"random_state\"] = cfg[\"random_state\"][0]\n del cfg[\"developers\"]\n cfg[\"model\"] = str(model)\n return cfg", "def outerjoin(self, target, onclause=None, full=False):\n return self.join(target, onclause=onclause, isouter=True, full=full,)", "def join(self, name):\n \n if name in self.roomList:\n pass\n else:\n self.sendCommand(\"global /join\",name)", "def hash_key(self):", "def test_join(self):\n pig = game.pig.Pig('PlayerA', 'PlayerB', 'PlayerC')\n self.assertEqual(pig.get_players(), ('PlayerA', 'PlayerB', 'PlayerC'))", "def test_get_opening_balance_journals_key(self):\n pass", "def award_recipient_agg_key(record: dict) -> str:\n if record[\"recipient_hash\"] is None or record[\"recipient_levels\"] is None:\n return json.dumps(\n {\n \"name\": record[\"recipient_name\"],\n \"duns\": record[\"recipient_unique_id\"],\n \"uei\": record[\"recipient_uei\"],\n \"hash\": \"\",\n \"levels\": \"\",\n }\n )\n return json.dumps(\n {\n \"name\": record[\"recipient_name\"],\n \"duns\": record[\"recipient_unique_id\"],\n \"uei\": record[\"recipient_uei\"],\n \"hash\": str(record[\"recipient_hash\"]),\n \"levels\": record[\"recipient_levels\"],\n }\n )", "def join(self, other):\n # In fact we simply will join the counters of this bundler with the\n # counters of the specified bundler.\n pairs = zip(self.gauges, other.gauges)\n\n # Wrap the call into the list conversion, since the imap method returns\n # a generator.\n list(map(lambda ab: ab[0].join(ab[1]), pairs))\n\n # It is important to return the referece to ourselves,\n # as it will be used as an accumulator in the reduce call.\n return self", "def get_all_party_id_name() -> List[Tuple]:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select id, name from party\"\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data" ]
[ "0.5744274", "0.5568393", "0.54927176", "0.52470684", "0.5210263", "0.5195514", "0.5135702", "0.51080775", "0.5093896", "0.50771976", "0.50136244", "0.50063086", "0.499069", "0.4975437", "0.49622023", "0.49614578", "0.49185023", "0.49098763", "0.48865524", "0.48730585", "0.4871309", "0.48539183", "0.48362428", "0.48279873", "0.48244128", "0.4823323", "0.48230946", "0.48159537", "0.48153108", "0.47877285", "0.47508365", "0.47181457", "0.46999058", "0.46921566", "0.4667044", "0.46642765", "0.4661253", "0.46474746", "0.46468857", "0.46391717", "0.46379432", "0.46367788", "0.46273065", "0.4612919", "0.4609411", "0.46024185", "0.45989776", "0.45909527", "0.45902586", "0.4588223", "0.45871738", "0.45852768", "0.45842335", "0.45807314", "0.45707598", "0.45694196", "0.45598", "0.45587066", "0.45534566", "0.45419934", "0.45362762", "0.4523753", "0.4517357", "0.4512357", "0.45115465", "0.45073906", "0.45071805", "0.45053446", "0.45038587", "0.4493331", "0.44824395", "0.4470308", "0.44702244", "0.44694635", "0.44644767", "0.44579473", "0.44567674", "0.44517255", "0.4451682", "0.44477066", "0.44465372", "0.44447553", "0.44420522", "0.44415638", "0.4440743", "0.4438095", "0.44373935", "0.44369656", "0.44344056", "0.44327155", "0.44308275", "0.4426067", "0.44244173", "0.4423415", "0.44220808", "0.4419755", "0.44155034", "0.44107428", "0.44052452", "0.44022486" ]
0.63631517
0
Generator for [n] number of names of lines.
def _get_names(n): from itertools import product def f(): m = 1 while True: it = product(*tuple([string.ascii_uppercase]*m)) for c in it: yield ''.join(c) m += 1 for _, c in zip(range(n), f()): yield c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_words(self,N):\n for i in xrange(N):\n prefix = \" \" * self.chainlen\n name = \"\"\n suffix = \"\"\n while True:\n suffix = self.get_suffix(prefix)\n if suffix == \"\\n\" or len(name) > 9:\n break\n else:\n name = name + suffix\n prefix = prefix[1:] + suffix\n yield name.capitalize()", "def generate_reader(n):\n counter = 1\n for i in range(n):\n name = generate_reader_name()\n if not name in readers:\n readers[name] = f'Reader/{counter}'\n counter += 1", "def generate_predictable_names():\n index = 0\n while True:\n index += 1\n yield f\"_{index}\"", "def test_python_name_generator():\n assert list(itertools.islice(get_python_name_generator(\"Some name\"), 3)) == [\n \"test_some_name\",\n \"test_some_name_1\",\n \"test_some_name_2\",\n ]", "def Names():\n for i in range(ida_name.get_nlist_size()):\n ea = ida_name.get_nlist_ea(i)\n name = ida_name.get_nlist_name(i)\n yield (ea, name)", "def makeTableNamesList(n, ):", "def _gennames(prefix, base, number):\n for index in xrange(number):\n yield \"%s%d\" % (prefix, base + index)", "def line(n, str):\n\n return_value = ''\n for _ in range(n):\n return_value += str\n return return_value", "def list_lines(self, num):\n return self.list_lines_gen(self.go_forward, num=num)", "def name_generator(names):\n name = ''\n for i, n in enumerate(names):\n if i % 2 == 0:\n name = n[:(roundup(len(n) / 2))]\n else:\n name = name + n[-(roundup(len(n) / 2)):]\n return name", "def takeNGenerator(seq, n):\n\tindex = 0\n\twhile index + n <= len(seq):\n\t\tyield seq[index:index + n]\n\t\tindex = index + 1", "def generate_resource_names(num):\n assert num > 0\n return [generate_resource_name(i) for i in range(num)]", "def nine_lines() -> str:\n print('now printing 9 lines')\n for _ in range(3):\n three_lines()", "def generate_string_list(self, n):\n template_str = 'string_'\n string_list = [template_str + str(i) for i in range(n)]\n return string_list", "def generate_rows(n):\n for i in range(n):\n yield [\n # seq\n i,\n # guid-like id\n hashlib.sha224(bytes(i)).hexdigest(),\n # seq\n i,\n # seq\n i,\n # cc_number \n fake.credit_card_number(card_type=None),\n # expire_date\n fake.date_between('-6y', '+0y').strftime(\"%m/%d/%Y\"),\n # billing_address\n fake.address(),\n ]", "def name_list(length, **kwargs):\n return list(itertools.islice(name_supply(**kwargs), length))", "def get_random_n_cleaned_names(name_list, n=100):\n random_name_list = []\n for i in range(n):\n random_name_list += [get_random_name(name_list)]\n\n return random_name_list", "def line(n):\n\n return_value = ''\n for _ in range(n):\n return_value += '#'\n return return_value", "def firstn(reader, n):\n\n # TODO(yuyang18): Check if just drop the reader, could clean the opened\n # resource or not?\n\n def firstn_reader():\n for i, item in enumerate(reader()):\n if i == n:\n break\n yield item\n\n return firstn_reader", "def generate_random_names(n=10):\r\n first_names_list_file = 'first_names_list.txt'\r\n last_names_list_file = 'last_names_list.txt'\r\n\r\n with open(r'C:/Users/VladB/Documents/GitHub/a678-BVlad917/' + first_names_list_file, 'r') as file:\r\n first_names = file.readlines()\r\n random_first_names = random.sample(first_names, n)\r\n random_first_names = [random_first_name.title().strip() for random_first_name in random_first_names]\r\n\r\n with open(r'C:/Users/VladB/Documents/GitHub/a678-BVlad917/' + last_names_list_file, 'r') as file:\r\n last_names = file.readlines()\r\n random_last_names = random.sample(last_names, n)\r\n random_last_names = [random_last_name.title().strip() for random_last_name in random_last_names]\r\n\r\n return zip(random_first_names, random_last_names)", "def rse_name_generator(size=10):\n return 'MOCK_' + ''.join(choice(ascii_uppercase) for x in range(size))", "def train(self, n, filename):\n self.n = n\n for line in open(filename):\n samp = line.rstrip('\\n')\n# samp = '~' + samp + '~'\n for i in range(len(samp) - n):\n w = samp[i:i + n]\n self.counts[w] += 1\n self.total_count += 1", "def make_nodes(n):\n return [\n protein(namespace='NS', name=str(i))\n for i in range(1, n)\n ]", "def generate_artificial_names(seed=\"\", num_names=1):\n generated_names = []\n \n stop = False\n while not stop:\n # generate names more than needed as some names may exist in real life\n num_needed_names = (num_names - len(generated_names)) * 3 // 2\n names = generate_names(seed=seed, num_names=num_needed_names)\n \n # check whether names are in dataset or not\n for name in names:\n if not is_real_name(name):\n generated_names.append(name)\n if len(generated_names) == num_names:\n stop = True\n break\n \n return generated_names", "def generate_artificial_names(seed=\"\", num_names=1):\n generated_names = []\n \n stop = False\n while not stop:\n # generate names more than needed as some names may exist in real life\n num_needed_names = (num_names - len(generated_names)) * 3 // 2\n names = generate_names(seed=seed, num_names=num_needed_names)\n \n # check whether names are in dataset or not\n for name in names:\n if not is_real_name(name):\n generated_names.append(name)\n if len(generated_names) == num_names:\n stop = True\n break\n \n return generated_names", "def makeDatabaseNamesList(n, ):", "def load_files(self, n=None):\n if not n:\n n = len(self.files)\n\n for _, name in zip(list(range(n)), self.files):\n yield self.load_file(name)", "def split_by_n(seq, n):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def _current_line_nr_gen(self):\n line_nr = -1\n while True:\n line_nr += 1\n yield line_nr", "def chunks(l, n):\n for i in range(0, n):\n yield l[i::n]", "def generate(self, lines: List[str]) -> None:\n raise NotImplemented()", "def get_names(lines): \n next = False \n names = []\n for line in lines:\n if next:\n if len(line) == 1:\n break\n else:\n tmp = line.split()\n names.append(tmp[1])\n if line.startswith('Sequences loaded ...'):\n next = True\n return names", "def set_name(self, n, line_number=0):\n self.name = n\n self._name_line = line_number", "def test_func_generator_name():\n for i in range(0, 4):\n yield 'try_odd', i", "def check_file_header(fnames, nlines=5):\n from itertools import islice\n for fname in fnames:\n print(f\"\\nPrinting header from {fname} \\n#########################################\")\n with open(fname) as f:\n head = list(islice(f, nlines))\n for line in head:\n print(line)", "def _chunk(self, l, n):\n for i in range(0, len(l) + 1, n):\n yield l[i:i + n]", "def kmers(self, n: int, step: int = 1) -> Generator:\n return (\n Seq(self.sequence[i : i + n]) for i in range(0, len(self.sequence), step)\n )", "def _gen_rand_name(n=10):\n # Ensure the name starts with a letter.\n return ''.join([random.choice(LETTER_LIST)]\n + random.choices(CHAR_LIST, k=n-1))", "def __chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def open_file(file,n,n1=0):\n\tfin = open(file)\n\tbook_lines = []\n\tcount = 0\n\tfor line in fin:\n\t\tword = line.strip()\n\t\tcount += 1\n\t\tif count > n:\n\t\t\tbook_lines.append(word)\n\treturn book_lines", "def chunks(parts, n):\n for i in range(0, len(parts), n):\n yield parts[i:i+n]", "def split_by_n( seq, n ):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def get_chunks(self, l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i+n]", "def _var_name_generator():\n count = itertools.count()\n while True:\n yield '_var_' + str(count.next())", "def chunks(l, n): # noqa: E741\n for i in range(0, len(l), n):\n yield l[i : i + n] # noqa: E203", "def generate_name(file_name, index):\n name = ''\n with open(file_name) as names:\n for i, line in enumerate(names):\n if i == index:\n name += line.split(None, 1)[0]\n return name", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(self, l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def create_n_defenders(n, rs_nb, hp_proportion, hp_unit_cost=0, offset=0, name=\"\"):\n defenders = []\n for i in range(offset,n+offset):\n if(name != \"\"):\n d = Defender(i,rs_nb,hp_proportion=hp_proportion,hp_unit_cost=hp_unit_cost, name=name)\n else:\n d = Defender(i,rs_nb,hp_proportion=hp_proportion,hp_unit_cost=hp_unit_cost)\n defenders.append(d)\n return defenders", "def get_layer_names_gen(self):\n for lyr in self.get_layer_names_as_array():\n yield lyr", "def _header_line_generator(file_name):\n with FileSystems.open(file_name) as f:\n record = None\n while True:\n record = f.readline().decode('utf-8')\n while record and not record.strip(): # Skip empty lines.\n record = f.readline().decode('utf-8')\n if record and record.startswith('#'):\n yield record\n else:\n break\n yield record", "def range(self, n):\n for i in range(n):\n yield self.get()", "def generate_batches(source,n=20):\n blist=[]\n with open(source) as f_source:\n start=next_n_lines(f_source, n); string=gen_string(start); blist.append(string)\n while start!=[]: start=next_n_lines(f_source, n); string=gen_string(start); blist.append(string)\n print('2. Generation of batches completed!')\n return blist", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def gen_items(n, fmt, obj):\n if n == 0:\n return gen_item(fmt, obj)\n lst = [0] * n\n for i in range(n):\n lst[i] = gen_item(fmt, obj)\n return lst", "def gen_items(n, fmt, obj):\n if n == 0:\n return gen_item(fmt, obj)\n lst = [0] * n\n for i in range(n):\n lst[i] = gen_item(fmt, obj)\n return lst", "def sample(f, n):\n entries = list(SeqIO.parse(f, 'fasta'))\n for seqnum in range(n):\n loc = round(random.uniform(0, len(entries) - 1))\n entry = entries[loc] # get index of randomly-selected FASTA entry\n header = '>' + str(seqnum + 1) + '-' + entry.description # header\n print(header + '\\n' + str(entry.seq)) # print-out entire entry", "def get_line(lines):\n for line in lines:\n yield line", "def get_line(lines):\n for line in lines:\n yield line", "def _chunks(l, n):\n\tfor i in range(0, len(l), n):\n\t\tyield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def _chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def user_gen(usernames_number = 1):\n for i in range(usernames_number):\n name1 = random.choice(words).title()\n name2 = random.choice(words).title()\n str_number = str(random.randint(1, 100)) \n print(f\"{name1}{name2}{str_number}\")", "def getEntryNames(self,lines):\n lines = self.stripText(lines)\n #check if the first line is a count, ignore if it is\n try:\n linecount = parseNum(lines[0])\n lines.pop(0)\n except ValueError:\n pass\n return lines", "def read_input():\n f = open(INPUT_FILE, 'r')\n \n number_of_lines = int(f.readline())\n for i in range(number_of_lines):\n yield [i+1, f.readline().split()]\n f.close()", "def generate_unseen_names(seed=\"\", num_names=1):\n generated_names = []\n \n i = 0\n while i < num_names:\n # generate a name\n name = generate_name(seed=seed)\n \n # check whether name is in dataset or not\n if not is_real_name(name):\n generated_names.append(name)\n i += 1\n \n return generated_names", "def unique_name():\n num = 0\n while True:\n yield \"theta_\" + str(num)\n num += 1", "def splitFile(filename, n):\n in_file = open(filename)\n line = in_file.readline()\n count = 0\n while line <> \"\":\n if count < 10: num = \"0\"+str(count)\n else: num = str(count)\n f = open(\"output/\"+filename+\"-\"+num,\"w\")\n for i in range(n):\n if line == \"\": break\n f.write(line)\n line = in_file.readline()\n f.close()\n count += 1\n return count", "def _read_skeleton(self, lines, line_index=0, n_lines=-1):\n line_index = line_index\n parents = []\n level = 0\n name = None\n if n_lines == -1:\n n_lines = len(lines)\n\n while line_index < n_lines:\n if lines[line_index].startswith(\"MOTION\"):\n break\n\n else:\n if \"{\" in lines[line_index]:\n parents.append(name)\n level += 1\n\n if \"}\" in lines[line_index]:\n level -= 1\n parents.pop(-1)\n if level == 0:\n break\n\n line_split = lines[line_index].strip().split()\n\n if line_split:\n\n if line_split[0] == \"ROOT\":\n name = line_split[1]\n self.root = name\n self.node_names[name] = {\n \"children\": [], \"level\": level, \"channels\": [], \"channel_indices\": []}\n\n elif line_split[0] == \"JOINT\":\n name = line_split[1]\n self.node_names[name] = {\n \"children\": [], \"level\": level, \"channels\": [], \"channel_indices\": []}\n self.node_names[parents[-1]][\"children\"].append(name)\n\n elif line_split[0] == \"CHANNELS\":\n for channel in line_split[2:]:\n self.node_channels.append((name, channel))\n self.node_names[name][\"channels\"].append(channel)\n self.node_names[name][\"channel_indices\"].append(len(self.node_channels) - 1)\n\n elif line_split == [\"End\", \"Site\"]:\n name += \"_\" + \"\".join(line_split)\n self.node_names[name] = {\"level\": level}\n # also the end sites need to be adde as children\n self.node_names[parents[-1]][\"children\"].append(name)\n\n elif line_split[0] == \"OFFSET\" and name in list(self.node_names.keys()):\n self.node_names[name][\"offset\"] = list(map(float, line_split[1:]))\n line_index += 1\n return line_index", "def chunks(self, l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def chunks(cls, l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def chunk(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i+n]", "def _gen_txt_data(self, f):\n\t\treader = iter(f)\n\n\t\tfor line_num, line in enumerate(reader):\n\t\t\tif line_num == 0 and self.has_header:\n\t\t\t\tcontinue\n\n\t\t\tdatum = line.rstrip('\\r\\n')\n\n\t\t\tyield datum, line_num+1", "def test_generator_method_name(self):\n for i in range(0, 4):\n yield 'try_odd', i", "def name_generator(size=8, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))", "def head(filename, lines=5):\n from itertools import islice\n with open(filename, \"r\") as f:\n return list(islice(f, lines))", "def chunks(self, l, n):\n yield l[:n-1]\n for i in range(n-1, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i + n]", "def chunks(l: List, n: int):\n for i in range(0, len(l), n):\n yield l[i : i + n] # noqa: E203", "def chunks(l: List, n: int):\n for i in range(0, len(l), n):\n yield l[i : i + n] # noqa: E203", "def print_line(n):\n for i in range(1,n+1):\n str1 = ('*' * (i))\n print(str1)", "def line_iter(s):\n\n i=0\n n=len(s)\n while i<n:\n j=s.find('\\n',i)\n if j<0:\n yield s[i:]\n j=n\n else:\n yield s[i:j]\n j+=1\n i=j", "def rank_name_generator(name):\n roman_numbers = [\"I\", \"II\", \"III\", \"IV\", \"V\", \"VI\", \"VII\"]\n ranks = [\"{} {}\".format(name, n) for n in roman_numbers]\n return ranks", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i: i+n]", "def _iterate_lines(cls, text) -> typing.Generator[str, None, None]:\n for line in text.split('\\n'):\n yield line, line.lstrip().startswith(cls._CHECKBOX)", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]" ]
[ "0.6696066", "0.6580343", "0.6550729", "0.6342123", "0.6274895", "0.6262439", "0.5949424", "0.58654", "0.58533746", "0.58261436", "0.57652855", "0.5749398", "0.5741963", "0.5740459", "0.5722183", "0.5720189", "0.57149017", "0.5682527", "0.5658359", "0.56177384", "0.5612223", "0.557943", "0.55705625", "0.5566015", "0.5566015", "0.5564365", "0.55534303", "0.55460083", "0.5500968", "0.5493926", "0.5474533", "0.54722756", "0.5468564", "0.5459077", "0.5455899", "0.5439868", "0.543244", "0.54297173", "0.5421169", "0.54198885", "0.539803", "0.5398022", "0.5384268", "0.53825384", "0.53795075", "0.5370048", "0.5364936", "0.5363994", "0.535699", "0.5352826", "0.53353584", "0.5335343", "0.53206646", "0.5319338", "0.5310542", "0.5310542", "0.5310542", "0.53066283", "0.53066283", "0.5305288", "0.5298121", "0.5298121", "0.5290473", "0.5288464", "0.5288464", "0.52817047", "0.5274513", "0.5266172", "0.52577883", "0.5254623", "0.52503216", "0.52460045", "0.5235159", "0.52312803", "0.52266204", "0.5221771", "0.52096426", "0.5206596", "0.5203834", "0.5202433", "0.5201564", "0.5199689", "0.5191131", "0.5179456", "0.51739573", "0.51736474", "0.51736474", "0.51736474", "0.51736474", "0.51736474", "0.51728153", "0.5169777", "0.5169777", "0.5149286", "0.5143706", "0.51353323", "0.5133028", "0.51225156", "0.51199645", "0.51199645" ]
0.65257424
3
Generate [n] random lines, and initialize internal structures.
def __init__(self, lines, names): # from graphing import Graph self.lines = lines self.remaining_events = [] leftmost = _MAX_RIGHT for i, (name, left, right) in enumerate(self.lines): self.lines[i] = (name, left-leftmost, right-leftmost) for i, (name, left, right) in enumerate(self.lines): self.remaining_events.append((left, i)) self.remaining_events.append((right, i)) self.remaining_events.sort() self.active_line_segments = [] self.sweep_line = None self.is_done = False self.idx = 0 self.a_line = None self.overlap_graph = nx.Graph(names) # self.interval_graph = nx.Graph(names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random(self, n=1):\n # self.num_generated += n", "def generate_synth_data(n):", "def _make_io_examples(self, n):\n rand = random.Random(6849275409234) # Test cases are fixed, but varied.\n io_examples = [\n ([4, 0], [4, 0]),\n ([0, 5], [5, 0]),\n ([1, 2], [3, 0]),\n ([67, 21], [88, 0]),\n ([55, 56], [111, 0]),\n ([128, 33], [161, 0]),\n ([221, 251], [216, 0]),\n ([130, 127], [1, 0]),\n ([255, 1], [0, 0])]\n extra_examples = max(n - len(io_examples), 0)\n for _ in xrange(extra_examples):\n a = rand.randrange(256)\n b = rand.randrange(256)\n input_seq = [a, b]\n output_seq = [(a + b) % 256, 0]\n io_examples.append((input_seq, output_seq))\n return io_examples", "def simplestRandom(n):\n # do something \"perlin noise like\" - with various frequency scales\n level1 = numpy.random.randint(0,4,size=4)\n level2 = numpy.random.randn(10)\n level3 = numpy.random.randn(50) * .5\n # make splines for each\n u1 = INTERP.UnivariateSpline(numpy.linspace(0,1,4) ,level1,s=0)\n u2 = INTERP.UnivariateSpline(numpy.linspace(0,1,10),level2,s=0)\n u3 = INTERP.UnivariateSpline(numpy.linspace(0,1,50),level3,s=0)\n # build the signal on the range 0..1 - then use linspace to sample it\n samples = numpy.linspace(0,1,n)\n return numpy.array([u1(u)+u2(u)+u3(u) for u in samples])", "def sample(f, n):\n entries = list(SeqIO.parse(f, 'fasta'))\n for seqnum in range(n):\n loc = round(random.uniform(0, len(entries) - 1))\n entry = entries[loc] # get index of randomly-selected FASTA entry\n header = '>' + str(seqnum + 1) + '-' + entry.description # header\n print(header + '\\n' + str(entry.seq)) # print-out entire entry", "def _random_pr_init(self, r, n, _random_prec_n=None):\n deg = self.degree\n random_gens = [x._array_form for x in self.generators]\n k = len(random_gens)\n if k < r:\n for i in range(k, r):\n random_gens.append(random_gens[i - k])\n acc = list(range(deg))\n random_gens.append(acc)\n self._random_gens = random_gens\n\n # handle randomized input for testing purposes\n if _random_prec_n is None:\n for i in range(n):\n self.random_pr()\n else:\n for i in range(n):\n self.random_pr(_random_prec=_random_prec_n[i])", "def _make_io_examples(self, n):\n rand = random.Random(6849275409234) # Test cases are fixed, but varied.\n io_examples = []\n for _ in xrange(n):\n n = rand.randrange(self.base)\n if n == 0:\n a, b = 0, rand.randrange(self.base)\n else:\n f = list(self._factors(n))\n a = f[rand.randrange(len(f))]\n b = n // a\n if rand.randrange(2):\n a, b = b, a\n io_examples.append(([a, b], [n]))\n return io_examples", "def sample(n, seed= 0):\n data = list(range(n))\n while True:\n np.random.seed(seed)\n np.random.shuffle(data)\n yield from data", "def lines_printed_random(lines_list):\n\n for _ in range(len(lines_list)):\n i = len(lines_list) - 1\n print(\n lines_list[randint(0, i)]\n )", "def generate_rows(n):\n for i in range(n):\n yield [\n # seq\n i,\n # guid-like id\n hashlib.sha224(bytes(i)).hexdigest(),\n # seq\n i,\n # seq\n i,\n # cc_number \n fake.credit_card_number(card_type=None),\n # expire_date\n fake.date_between('-6y', '+0y').strftime(\"%m/%d/%Y\"),\n # billing_address\n fake.address(),\n ]", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def random_sample(self, n):\n indices = random.sample(xrange(np.shape(self.data)[0]), n)\n table = DataTable(self.data[indices], self.dims, self.legends, self.tags.copy())\n return table", "def get_random_baselines(n_trials=5):\n if not os.path.exists(\"random-baseline/\"):\n os.makedirs(\"random-baseline/\")\n r = 0.3\n ori_sents = joblib.load('data/train-isw-sentences.pkl')\n ori_labels = joblib.load('data/train-isw-labels.pkl')\n assert len(ori_sents) ==len(ori_labels)\n\n # Zip the sents and its tags\n train_set = []\n for sent, label in zip(ori_sents, ori_labels):\n train_set.append((sent, label))\n assert len(train_set) == len(ori_sents)\n\n len_subsample = int(len(train_set)*r)\n\n # Save random samples\n for i in range(n_trials):\n random_set = choices(train_set, k=len_subsample)\n joblib.dump(random_set, 'random-baseline/random-train-r{}.pkl'.format(i))\n\n # Train and eval random baseline models\n for i in range(n_trials):\n train_script = \"python run_ner.py --output_dir random-baseline/trial{}_model/ --max_seq_length 128 --do_train --do_subtrain --subtrain_dir random-baseline/random-train-r{}.pkl\".format(i, i)\n os.system(train_script)\n\n eval_script = \"python run_ner.py --output_dir random-baseline/trial{}_model/ --do_eval --eval_on test --eval_dir random-baseline/eval_monitor/ --it_prefix {}\".format(i, i)\n os.system(eval_script)", "def initialiser(N, dimensions = 2):\r\n \r\n #shape for correct dimensions\r\n shape = tuple([N]) * dimensions\r\n \r\n #randomise spins\r\n lattice = np.random.choice([1,-1], size = shape)\r\n \r\n return lattice", "def generate_random_list(self, n):\n return [self.generate_random_payload((int, str, float, bool, list, dict)) for i in range(n)]", "def example7(n):\n return mvmt.randomize(tile, n)", "def create_n_items(n):\n total_objects = models.Item.objects.all().count()\n for i in range(n):\n models.Item.objects.create(\n name=\"Randomly generated object {}\".format(i+total_objects),\n value=random.random() * 1000000\n )", "def __init__(self, args, n = 1):\n self.args = args\n self.n = n\n # The variable that holds a list of string representations of\n # randomly generated HTML documents\n self.doc_strings = []\n print_verbose(\n \"INFO : Generating random HTML documents...\",\n self.args.verbose,\n )\n # Generate a number of random HTML documents\n for i in range(self.n):\n self.doc_strings.append(self.generate_document())", "def generate_random_texts(n):\n assert n >= 0\n global FirstText, SecondText\n FirstText = str(\"\".join([random.choice(string.letters[:26]) for i in xrange(n)]))\n SecondText = str(\"\".join([random.choice(string.letters[:26]) for i in xrange(n)]))", "def create_random_points(n):\n\n\treturn [(random.randint(0,n),random.randint(0,n)) for i in range(n)]", "def random(cls, n=2):\n angles = [Motion(math.pi*Vector.random(d=1), Vector.null(d=1),\n Vector.null(d=1)) for i in range(n)]\n lengths = [random.uniform(0, 1) for i in range(n)]\n masses = [random.uniform(0, 1) for i in range(n)]\n return cls(angles, lengths, masses)", "def __init__(self, n_rows: int = 2, n_columns: int = 2):\n self.set_uniform(n_rows, n_columns)", "def totem_random():\n random_head()\n random_head()\n random_head()", "def generate(self, n_enemies, n_blocks):\n self.create_map()\n self.create_floor()\n self.player = self.create_player_at(0, 0)\n self.create_dummy_obj_at(0, 1)\n self.create_dummy_obj_at(1, 0)\n self.create_soft_block_at(0, 2)\n self.create_soft_block_at(2, 0)\n self.create_hard_blocks() \n self.create_enemies(n_enemies)\n self.create_soft_blocks(n_blocks) \n self.clear_dummy_obj()", "def getRandomList(n):\n lyst = list()\n for count in range (n):\n lyst.append(random.randint(1, n))\n return lyst", "def generate(self, n, d):\n\n self.n = n\n self.d = d\n self.X = np.random.rand(n, d)\n self.Y = np.random.choice([0, 1], size=n)", "def _make_io_examples(self, n, max_len):\n rand = random.Random(6849275409234) # Test cases are fixed, but varied.\n io_examples = []\n io_examples.append(([10, 0], [0]))\n io_examples.append(([1, 0], [1]))\n io_examples.append(([1, 1, 0], [2]))\n io_examples.append(([9, 4, 19, 11, 5, 0], [0]))\n io_examples.append(([24, 11, 26, 1, 15, 0], [1]))\n for _ in xrange(n - 5):\n length = rand.randrange(2, max_len + 1)\n num_chars = rand.randrange(0, max_len + 1)\n input_seq = [self.char] * num_chars + [0] * (length - num_chars)\n rand.shuffle(input_seq)\n for i in xrange(len(input_seq)):\n if not input_seq[i]:\n input_seq[i] = self.other_chars[rand.randrange(len(self.other_chars))]\n output_seq = [num_chars]\n io_examples.append((input_seq, output_seq))\n return io_examples", "def _make_io_examples(self, n, min_len, max_len):\n rand = random.Random(6849275409234) # Test cases are fixed, but varied.\n io_examples = []\n for _ in xrange(n):\n length = rand.randrange(min_len, max_len + 1)\n input_seq = [rand.randrange(1, self.base) for _ in xrange(length)]\n output_seq = list(input_seq)\n output_seq.reverse()\n output_seq.append(0)\n io_examples.append((input_seq, output_seq))\n return io_examples", "def get_n_random_itrees(self, n, subs_size):\n random_itrees = np.empty(n, dtype=object) # Allocate list for storing the trees.\n # TODO: parallelize!\n for k in np.arange(n):\n # Get a random sample of training examples to build next random itree.\n data_sub = self.data[np.random.choice(self.data.shape[0], subs_size, replace=False), :]\n random_itrees[k] = self.get_random_itree(data_sub) # Get next random itree \n self.random_itrees = random_itrees\n self.subs_size = subs_size", "def initRandomGraph(ctor,n,m):\n\tg=ctor(n)\n\taddedEdges=0\n\twhile addedEdges < m:\n\t\tx=random.randrange(0,n)\n\t\ty=random.randrange(0,n)\n\t\tif not g.isEdge(x,y):\n\t\t\tg.addEdge(x,y)\n\t\t\taddedEdges+=1\n\treturn g", "def _make_io_examples(self, n, min_len, max_len):\n rand = random.Random(6849275409234) # Test cases are fixed, but varied.\n io_examples = []\n for _ in xrange(n):\n length = rand.randrange(min_len, max_len + 1)\n rm_char_pos = rand.randrange(0, length)\n input_seq = [rand.randrange(1, self.base) for _ in xrange(length)]\n input_seq[rm_char_pos] = self.remove_char\n output_seq = list(input_seq)\n del output_seq[rm_char_pos]\n output_seq.append(0)\n io_examples.append((input_seq, output_seq))\n return io_examples", "def write_line(maxn):\n width = len(str(maxn)) + 1\n line = ([f'{random.randint(-maxn,maxn):{width}}'] +\n list(\n chain.from_iterable((random.choice('+-'), \n f'({random.randint(-maxn,maxn):{width}})')\n for _ in range(3))) +\n ['= ____'])\n return ' '.join(line)", "def generate_random_dict(self, n):\n dict_content = (int, str, list, dict)\n return {self.generate_random_string(10): self.generate_random_payload(dict_content)\n for i in range(n)}", "def Sample(n=6):\n t = [random.normalvariate(0.0, 1.0) for i in range(n)]\n t.sort()\n return t", "def initialize(self):\n N=self.N\n M=[]\n a=random.rand(self.d,1,self.D)\n M.append(a)\n for i in range(1,N-1):\n a=random.rand(self.d,self.D,self.D)\n M.append(a)\n a=random.rand(self.d,self.D,1)\n M.append(a)\n return M", "def add_random_fields(smali_line):\n for _ in range(u.random_nop_interval()):\n print re.sub(r':', u.get_random(True, 32) + ':', smali_line), # Append", "def init_sim(self,n):\n self.beacon = beacon(ENABLE_BEACON_DELAY)\n self.data = data_utils(n)\n random.seed()\n\n if n < 3:\n print 'Number of receivers %i is less than three.' %n\n print 'Simulation controller will not run.'\n print 'Now exiting.'\n sys.exit()\n \n self.data.set_rx_number(n)\n\n\n\n tx_loc = test_coords.get_tx_coords()\n self.data.set_tx_location(tx_loc)\n # self.data.reset_rx_location()\n\n for i in range(n):\n rx_loc = alex_random.get_random_coord()\n if self.DEBUG:\n print \"\\n\\n\\n\\n\\n\\nstore location: \", rx_loc\n print '\\n\\n\\n\\n\\n\\n'\n self.data.set_rx_location(i,rx_loc)\n\n tof = self.geo_utils.time_of_flight(rx_loc,tx_loc)\n self.data.set_rx_time_delay(tof)\n\n id = i+1\n self.data.set_rx_team_id(id)\n\n if self.DEBUG:\n print 'tx_loc: ', tx_loc\n print 'rx_loc: ', rx_loc\n print 'time: ', repr(tof)\n print 'id: ', id", "def _generateblocks(self, n):\n if self.key is None:\n raise AssertionError('generator must be seeded before use')\n result = b''\n for i in range(n):\n result += self._cipher.encrypt(self.counter())\n return result", "def generate_reader(n):\n counter = 1\n for i in range(n):\n name = generate_reader_name()\n if not name in readers:\n readers[name] = f'Reader/{counter}'\n counter += 1", "def random_board(n):\r\n \r\n return(np.random.randint(0,n-1, size = n))", "def Gen_RandLine(length, step_max, dims=2):\n \n lineData = np.empty((dims, length))\n lineData[:, 0] = np.random.rand(dims)\n for index in range(1, length):\n step = ((np.random.rand(dims) - 0.5)*step_max)\n lineData[:, index] = lineData[:, index - 1] + step\n return lineData", "def sample(self, n=1):\n raise NotImplementedError", "def set_rf_samples(n):\n forest._generate_sample_indices = (lambda rs, n_samples:\n forest.check_random_state(rs).randint(0, n_samples, n))", "def sample(self, n):\n raise NotImplementedError", "def static_trajectory(Tinit, n):\n for i in xrange(n):\n yield Tinit", "def generate_sequence(self, n=100, initial_state=None):\n\n if initial_state is None:\n if self.pad:\n sequence = [START_OF_SEQ] * self.order\n else:\n sequence = list(random.choice(self.records.keys()))\n else:\n sequence = initial_state[:]\n\n for i in range(n):\n current_state = tuple(sequence[-self.order:])\n next_token = self.sample(current_state)\n sequence.append(next_token)\n\n if next_token == END_OF_SEQ:\n return sequence\n\n return sequence", "def construct_initial_sample(n):\n sample_normal = np.random.normal(size=(n, 3))\n sample_radius = np.linalg.norm(sample_normal, axis=1, keepdims=True)\n sample_cartesian = sample_normal / sample_radius\n sample_polar = cartesian_to_polar(sample_cartesian)\n return np.reshape(sample_polar[:, 1:3], (-1))", "def __init__(self, n, prey_cnt=0, predator_cnt=0):\n # print n, prey_cnt, predator_cnt\n self.grid_size = n\n self.grid = []\n for i in range(n):\n row = [0]*n # row is a list of n zeros\n self.grid.append(row)\n self.init_animals(prey_cnt, predator_cnt)", "def make_random_automaton(n: int)->Automaton:\n seed = (next(rand_num))\n table = [[(next(rand_num)) for _ in range(n)] for _ in range(n)]\n return Automaton(seed, 0.0, table, seed)", "def randomgrid(self, n):\n lam = np.random.random((n, 3))\n return self.normalize(lam)", "def random(cls, n=2):\n return cls(Pendulum.random(n=2))", "def generate(self, num_leafs):\n leafs = self.get_leafs()\n for _ in range(num_leafs):\n box = leafs[np.random.choice(len(leafs))]\n leafs.remove(box)\n ch0, ch1 = box.split()\n self.add_edge(box, ch0)\n self.add_edge(box, ch1)\n leafs.append(ch0)\n leafs.append(ch1)", "def random(self, n=1):\n # Generate a sample using a Van der Corput sequence per dimension.\n # important to have ``type(bdim) == int`` for performance reason\n sample = [van_der_corput(n, int(bdim), self.num_generated,\n scramble=self.scramble,\n seed=copy.deepcopy(self.seed))\n for bdim in self.base]\n\n self.num_generated += n\n return np.array(sample).T.reshape(n, self.d)", "def Gen_RandLine(length, dims=2):\n lineData = np.empty((dims, length))\n lineData[:, 0] = np.random.rand(dims)\n for index in range(1, length):\n # scaling the random numbers by 0.1 so\n # movement is small compared to position.\n # subtraction by 0.5 is to change the range to [-0.5, 0.5]\n # to allow a line to move backwards.\n step = ((np.random.rand(dims) - 0.5) * 0.1)\n lineData[:, index] = lineData[:, index - 1] + step\n\n return lineData", "def _sample(self, n=1):\n return [self[i] for i in np.random.choice(self.length, n, replace=False)]", "def batchify(t, n, randomize=True):\n\n inds = np.arange(t)\n if randomize:\n np.random.shuffle(inds)\n\n while len(inds) > 0:\n\n yield inds[:n]\n inds = np.delete(inds, slice(n))", "def random(self, k=1000, n=100):\n a = numpy.random.randint(k, size=n)\n _, self.counts = numpy.unique(a, return_counts=1)\n self.nk, self.zk = numpy.unique(self.counts, return_counts=1)\n return self", "def __init__(self, n: int):\n self.n = n\n self.rows_1 = [0 for _ in range(n + 1)]\n self.rows_2 = [0 for _ in range(n + 1)]\n self.cols_1 = [0 for _ in range(n + 1)]\n self.cols_2 = [0 for _ in range(n + 1)]\n self.diag1 = [0 for _ in range(n + 1)]\n self.diag2 = [0 for _ in range(n + 1)]", "def generate(count):\n lst = []\n with open('data.txt', 'w+') as f:\n for i in range(0, count):\n st = str(random.random())\n f.write(st+\"\\n\")\n lst.append(st)\n return lst", "def test_full_setup(n):\n for x in range(n):\n for y in range(n):\n Stitch(x,y)\n Stitch.stitches[(x,y)].vital = True if round(rnd.random()) == 1 else False", "def fill_repo_with_random_persons(self, n=10, id_lb=1, id_ub=100):\r\n random_ids, random_names, random_phone_numbers = self.generate_random_persons(n, id_lb, id_ub)\r\n for id_, name, phone_num in zip(random_ids, random_names, random_phone_numbers):\r\n self.add_person(id_, ' '.join(name), phone_num)", "def _make_io_examples(self, n, max_len):\n rand = random.Random(6849275409234) # Test cases are fixed, but varied.\n io_examples = []\n io_examples.append(([0], [1]))\n io_examples.append(([4, 2, 0], [1]))\n io_examples.append(([2, 4, 0], [1]))\n io_examples.append(([3, 1, 0], [1]))\n io_examples.append(([1, 3, 0], [1]))\n io_examples.append(([1, 0], [0]))\n io_examples.append(([2, 0], [0]))\n io_examples.append(([3, 0], [0]))\n io_examples.append(([4, 0], [0]))\n for _ in xrange(n):\n is_true = rand.randrange(2)\n length = rand.randrange(1, max_len + 1)\n if is_true:\n # Make a true case.\n length = (length >> 1) << 1 # Make even.\n partition = (rand.randrange(length + 1) >> 1) << 1\n a = partition >> 1\n b = (length - partition) >> 1\n counts = {1: a, 2: b, 3: a, 4: b}\n else:\n # Make a false case.\n partitions = (\n [0]\n + sorted([rand.randrange(length + 1) for _ in range(3)])\n + [length])\n counts = {n: partitions[n] - partitions[n - 1] for n in range(1, 5)}\n if counts[1] == counts[3] and counts[2] == counts[4]:\n # By chance we sampled a true case. Make it false by exchanging\n # one count between even and odd pairs.\n base = 1 + 2 * rand.randrange(2)\n a, b = (base, base + 1) if rand.randrange(2) else (base + 1, base)\n if counts[a] == length or counts[b] == 0:\n # If counts are at their extreme values, then swap who gets\n # incremented and decremented.\n a, b = b, a\n counts[a] += 1\n counts[b] -= 1\n assert counts[a] <= length and counts[b] >= 0\n assert sum(counts.values()) == length\n input_seq = [n for n in xrange(1, 5) for _ in xrange(counts[n])]\n rand.shuffle(input_seq)\n input_seq += [0]\n output_seq = self._solve(input_seq)\n assert output_seq[0] == is_true\n io_examples.append((input_seq, output_seq))\n return io_examples", "def random(s, n=None):\r\n if n is None:\r\n return s._random()\r\n else:\r\n return Sample([s._random() for i in xrange(n)])", "def rand_bodies(n: int) -> List[Body]:\n return [\n Body(\n mass=random.uniform(0, 1000),\n position=Vector(random.random(), random.random(), random.random()),\n velocity=Vector(0.0, 0.0, 0.0),\n )\n for _ in range(n)\n ]", "def gen_random_id(self, n: int = 12) -> object:\n random_source = string.ascii_letters + string.digits\n id_ = random.choice(string.ascii_lowercase)\n id_ += random.choice(string.ascii_uppercase)\n id_ += random.choice(string.digits)\n\n for i in range(n):\n id_ += random.choice(random_source)\n\n _list = list(id_)\n random.SystemRandom().shuffle(_list)\n clid = ''.join(_list)\n return clid", "def sample(self, n, include=True):\n return [self(t / n) for t in range(n + int(include))]", "def random_ngon_linify(cymk_img, n_min=3, n_max=6, r_min = .1, r_max = 1.):\n c_lines, y_lines, m_lines, k_lines = [],[],[],[]\n for X in range(cymk_img.shape[0]):\n for Y in range(cymk_img.shape[1]):\n if (cymk_img[X,Y,0]>0.5):\n R = random.random()*(r_max-r_min) + r_min\n NN = random.randint(n_min,n_max)\n phase = random.random()*2*pi\n c_lines.append(ngon(X,Y,r=R,n=NN,phase=phase))\n if (cymk_img[X,Y,1]>0.5):\n R = random.random()*(r_max-r_min) + r_min\n NN = random.randint(n_min,n_max)\n phase = random.random()*2*pi\n y_lines.append(ngon(X,Y,r=R,n=NN,phase=phase))\n if (cymk_img[X,Y,2]>0.5):\n R = random.random()*(r_max-r_min) + r_min\n NN = random.randint(n_min,n_max)\n phase = random.random()*2*pi\n m_lines.append(ngon(X,Y,r=R,n=NN,phase=phase))\n if (cymk_img[X,Y,3]>0.5):\n R = random.random()*(r_max-r_min) + r_min\n NN = random.randint(n_min,n_max)\n phase = random.random()*2*pi\n k_lines.append(ngon(X,Y,r=R,n=NN,phase=phase))\n return c_lines, y_lines, m_lines, k_lines", "def test_init():\n LINES = (\n \"One morn before me were three figures seen,\",\n \"And once more came they by:-alas! wherefore?\",\n )\n for line in LINES:\n assert(LineBuilder(line).line == line)", "def __init__(self, n: int) -> None:\n\n assert n > 1, \"for n = 1 use Bernoulli distribution.\"\n\n self.n = n", "def _extend(filename, n, keys=()):\n\n with open(filename, 'r') as file:\n header = file.readline()\n reader = csv.reader(file)\n lines = [_ for _ in reader]\n\n fname = f\"{filename}_{n}.csv\"\n with open(fname, 'w') as file:\n file.write(header)\n for line in lines:\n file.write(','.join(line) + '\\n')\n # file.writelines([','.join(x) for x in lines])\n # file.write('\\n')\n\n if not keys:\n these_keys = set([line[0].strip() for line in lines])\n else:\n these_keys = set()\n n = n // 5\n\n for i in range(n):\n for line in lines:\n mod_words = line[:]\n\n if keys: # Use provided users and products\n uid = random.choice(keys[0])\n pid = random.choice(keys[1])\n\n counter = 0\n while (uid, pid) in these_keys:\n uid = random.choice(keys[0])\n pid = random.choice(keys[1])\n if counter > 100:\n break\n\n if (uid, pid) in these_keys:\n continue\n\n file.write(f\"{uid}, {pid}, {random.randint(1, int(mod_words[-1].strip()) * 2)}\\n\")\n else:\n mod_key = ''.join([random.choice(string.ascii_letters) for _ in range(len(mod_words[0]))])\n while mod_key.strip() in these_keys:\n mod_key = ''.join([random.choice(string.ascii_letters) for _ in range(len(mod_words[0]))])\n these_keys.add(mod_key)\n mod_words[0] = mod_key\n\n for j, word in enumerate(line[1:], 1):\n # If a phone number, randomize digits\n if re.match(r\"\\d{3}-\\d{3}-\\d{4}\", word.strip()):\n num = f\"{random.randint(0, 9999999999):09d}\"\n mod_words[j] = num[:3] + '-' + num[3:6] + '-' + num[-4:]\n # If a number, randomize\n elif re.fullmatch(r\"\\d*\", word.strip()):\n num = random.randint(1, int(word.strip()) * 2)\n mod_words[j] = str(num)\n else: # Replace 1/2 of characters with random digits\n mod_locs = [random.randint(0, len(word) - 1) for _ in range(len(word) // 2)]\n lst = list(word)\n for loc in mod_locs:\n lst[loc] = random.choice(string.ascii_letters)\n mod_words[j] = ''.join(lst)\n\n file.write(','.join(mod_words) + '\\n')\n # file.writelines([]) for line in lines])\n\n return fname, these_keys", "def __init__(self, n):\n self.rows = [0 for _ in range(n)]\n self.columns = [0 for _ in range(n)]\n # First diagonal x+y, second y-x\n self.diagonal = [0, 0]\n self.score = {1: 1, 2: n+1}\n self.win = {1: n, 2: (n+1)*n}\n self.size = n", "def generate_nums(filename, n):\n text = ''\n for i in range(n):\n num = random.randrange(0, 100)\n text += (str(num) + '\\n')\n f = open(filename, 'w')\n f.write(text)\n f.close()\n return", "def init_prey(n):\n \n prey = np.zeros(n, dtype='uint32, 2float64, uint8')\n prey.dtype.names = ('index', 'position', 'alive')\n prey['index'] = range(n)\n prey['position'] = np.random.rand(n, 2)\n prey['alive'] = np.ones(n)\n return prey", "def __init__(self, nonogram_size):\n # create random id\n self.nonogram_id = uuid.uuid4()\n self.row_numbers = [(2), (2), (2)]\n self.column_numbers = [(1, 1), (3), (1)]\n self.nonogram_size = nonogram_size\n self.grid = Nonogram.create_rand_grid(nonogram_size)\n #TODO\n self.fitness = 999", "def generate_random_states(n):\n val_memory = Memory(size=n)\n while val_memory.records_added < n:\n t = TicTacToe()\n game_transactions = []\n while t.game_over == 0:\n current_state = t.get_current_state()\n action, _ = RandomAgent.play(current_state)\n reward, game_over = t.execute_action(action)\n next_state = t.get_current_state()\n is_game_over = t.game_over\n\n state_data = [current_state, action, reward, next_state, is_game_over]\n game_transactions.append(state_data)\n\n # after game is over\n # assign next states and save transactions to memory\n for k, transaction in enumerate(game_transactions):\n if transaction[4] == 0: # if game is not over\n next_state = game_transactions[k+1][3].copy() # next state from opponent's move\n else:\n next_state = np.full(9, -1) # invalid data as it's never used for training\n transaction[3] = next_state\n val_memory.add_record(transaction)\n\n return val_memory", "def random_text(self, n=100):\n # choose a random prefix (not weighted by frequency)\n start = random.choice(list(self.suffix_map.keys()))\n #print(\">>DEBUG | start is\", start)\n \n for i in range(n):\n #print(\">> DEBUG | i is\", n)\n suffixes = self.suffix_map.get(start, None)\n #print(\">> DEBUG | suffixes is\", suffixes)\n if suffixes == None:\n # if the start isn't in map, we got to the end of the\n # original text, so we have to start again.\n #print(\">> DEBUG | start isn't in map\")\n random_text(n-i)\n return\n\n # choose a random suffix\n word = random.choice(suffixes)\n #print(\">> DEBUG | word is\", word)\n print(word, end=' ')\n start = self.shift(start, word)", "def __generate_test_config(m,n):\n L = np.random.rand(m)\n L /= np.sum(L)\n S = np.random.rand(m)\n S /= np.sum(S)\n A0 = np.random.randint(2, size=n)\n if(np.sum(A0) == 0):\n A0[0] = 1\n max_n0 = np.sum(A0)\n A1 = np.random.randint(2, size=n)\n if(np.sum(A1) == 0):\n A1[0] = 1\n max_n1 = np.sum(A1)\n R = np.zeros((m,n))\n for i in range(0,m):\n k = random.randint(0,max_n0-1)\n for j in range(0,n):\n if A0[j] == 1:\n if k == 0:\n R[i,j] = 1.0\n k -= 1\n Y = np.random.rand(m,n)\n for j in range(0,n):\n Y[:,j] /= np.sum(Y[:,j])\n return (L, S, A1, R, Y)", "def initialize_random_number_generator(self,question_type):\n\t\tself.generator.seed(self.generate_index(self.magic, self.level, self.problem_id, question_type))", "def __init__(self, n):\n self.rows = [0] * n\n self.cols = [0] * n\n self.diagonal1 = 0\n self.diagonal2 = 0\n self.n = n", "def _generate_raw_environments(self, num, seed):", "def gen_test_points(n=50, extent=(0,0,100,100), rand_seed=None):\n if rand_seed:\n random.seed(rand_seed)\n return [(random.randint(extent[0], extent[2]), random.randint(extent[1], extent[3]))\n for i in xrange(n)]", "def expensive_function_creates_data(n):\n my_data = {}\n for _ in range(0, n):\n my_list = sorted([randint(0, 2 * n) for _ in range(0, n)])\n my_data[my_list[0]] = my_list\n return my_data", "def create_song(self, lnmn, lnmx):\n # decide on the length of the song\n nlng = random.randint(lnmn, lnmx)\n\n # load the database\n lns = self.read_database()\n\n # randomly pick nlng lines\n rsong = []\n for i in range(nlng):\n j = random.randint(0,len(lns)-1)\n rsong.append(lns[j])\n\n return rsong", "def sample(self, n):\n idx = np.random.randint(0, len(self.memory), size=n)\n return [self.memory[i] for i in idx]", "def __init__(self, n=1):\n vertices = [Vertex(i) for i in range(n)]\n for vertex in vertices:\n self.add_vertex(vertex)\n self.populate_graph()", "def generateSDR(n, w):\n sdr = np.zeros((n, ))\n randomOrder = np.random.permutation(np.arange(n))\n activeBits = randomOrder[:w]\n sdr[activeBits] = 1\n return sdr", "def chunks(l, n):\n for i in range(0, len(l), n):\n elem = l[i:i + n]\n random.shuffle(elem)\n yield elem", "async def pornx(ctx, n: int = 1):\r\n msg = [\"{}\".format(text) for text in [random.choice(data) for _ in range(0,n)]]\r\n await bot.say('\\n'.join(msg))", "def generate_line_full(offsets: List[int]) -> str:\n line = \"\"\n for k in offsets:\n line += random_string(k)\n return line", "def grow_forest( n, records ):\n dataset = Dataset( records )\n record_number = dataset.size\n\n dts = []\n for i in xrange(n):\n print \"Training\", i\n # pick randomly as many records as the number in the dataset.\n picked_records = []\n for j in xrange( record_number ):\n ind_picked = randint(0, record_number-1)\n picked_records.append( dataset[ ind_picked ] )\n picked_records = Dataset( picked_records )\n # train a tree with these records and add it to the forest\n tree = train(picked_records)\n dts.append( tree )\n return dts", "def __init__(self, n, sick_init, social_dist, radius=0.01, styles=None, total_beds=10, box_length=1, recovery_time=1000):\n\n self.init_persons(n, sick_init, social_dist, radius, box_length, recovery_time, total_beds, styles)\n self.init_hospital(total_beds)", "def generate_random(self: object) -> None:\n self.random.set(Sequence.generate(length=50))", "def set_uniform(self, n_rows: int = 2, n_columns: int = 2):\n self.n_rows = n_rows\n self.n_columns = n_columns\n self.c_matrix = [BaseDistribution(n_items = n_columns) for x in range(n_columns)]\n self.prior = BaseDistribution(n_items = n_rows)\n return self", "def rand(self):\n raise NotImplementedError", "def makeRandomChains( nChains=1 ):\n\t\n\t# retrieve the binding partner specifications\n\t(maxsize,types) = getTypes()\n\t\t\n\t# array to hold finished, random chains\n\tallChains = []\n\t\n\tfull = False\t\n\twhile( not full ):\n\t\t# array to hold the currently constructed chain\n\t\tnewChain = []\n\t\t\n\t\t# start the chain with a connected TRAP+AT\n\t\taddComponent(newChain,types[0],0,0)\n\t\taddComponent(newChain,types[1],1,1)\n\t\tconnectComponents(newChain,0,randint(0,types[0]['nSites']-1),1,randint(0,types[1]['nSites']-1))\n\n\t\tnTRAPs = 1\n\t\tnATs = 1\n\t\n\t\twhile( (nTRAPs < types[0]['max']) or (nATs < types[1]['max']) ):\n\t\t\t\n\t\t\tindex = nTRAPs + nATs\n\t\t\t\n\t\t\tif( (random() < 0.5) and (nTRAPs < types[0]['max']) ):\n\t\t\t\tlist = makeSiteList(newChain,types[0],0)\n\t\t\t\tif(len(list) > 0):\n\t\t\t\t\tsite = choice(list)\n\t\t\t\t\taddComponent(newChain,types[0],index,index)\n\t\t\t\t\tconnectComponents(newChain,site[0],site[1],index,randint(0,types[0]['nSites']-1))\n\t\t\t\t\tnTRAPs += 1\n\t\t\telif(nATs < types[1]['max']):\n\t\t\t\tlist = makeSiteList(newChain,types[1],0)\n\t\t\t\tif(len(list) > 0):\n\t\t\t\t\tsite = choice(list)\n\t\t\t\t\taddComponent(newChain,types[1],index,index)\n\t\t\t\t\tconnectComponents(newChain,site[0],site[1],index,randint(0,types[1]['nSites']-1))\n\t\t\t\t\tnATs += 1\n\t\t\t\t\t\t\t\n\t\tallChains.append( Copy(newChain) )\n\t\t\n\t\tif( len(allChains) == nChains ):\n\t\t\treturn allChains\n\tpass", "def initialize_model_randomly(self, shape=(2048, 2048), tn=50):\n self.coeffs = self.generate_random_coeffs(shape, tn)", "def generate(self):\n for i in range(4):\n random_first = randomize_first_box()\n self.randomize(random_first)\n for i in range(9):\n random_pos = randomize_position()\n self.randomize(random_pos)\n self.board.solve()", "def gen_random_chars(n: int = 10) -> Text:\n if n < 1:\n raise Exception('Number of random chars to generate has to be > 0')\n\n return ''.join(choice(ascii_lowercase + '-_')\n for i in range(n))", "def random(cls, n=random.randint(5, 10), d=2, borns=[-1, 1], **kwargs):\n points = [Point.random(d=d, borns=borns) for i in range(n)]\n form = cls(points, **kwargs)\n form.makeSparse()\n return form", "def generate_n(k_problem, n):\n return [generate_first_random(k_problem) for i in range(n)]" ]
[ "0.70913213", "0.6437352", "0.63017213", "0.6262578", "0.6255006", "0.6248576", "0.6231163", "0.61695033", "0.61533153", "0.6150361", "0.6143328", "0.6143328", "0.61114734", "0.6106545", "0.61052126", "0.6093884", "0.60880303", "0.60650104", "0.60589135", "0.60510826", "0.60504454", "0.60492724", "0.6033823", "0.60290074", "0.60195726", "0.60148627", "0.59523785", "0.5943996", "0.59242904", "0.5920626", "0.59102887", "0.59004277", "0.58734834", "0.58694994", "0.58499074", "0.58353376", "0.5831426", "0.5809736", "0.5809506", "0.5808472", "0.5795694", "0.5777337", "0.5752237", "0.5741768", "0.57404625", "0.57359135", "0.5726859", "0.5720927", "0.5705927", "0.5679681", "0.56753266", "0.56603825", "0.5657378", "0.56545234", "0.56535476", "0.56459254", "0.56317294", "0.56247085", "0.56229717", "0.56209886", "0.56180304", "0.5607776", "0.5607407", "0.5607117", "0.5603918", "0.55916476", "0.55889404", "0.55783087", "0.55751735", "0.55734503", "0.5571055", "0.5568916", "0.55664456", "0.5550831", "0.55508304", "0.5549572", "0.5546731", "0.55379474", "0.55372304", "0.55348784", "0.55316454", "0.55316424", "0.55269015", "0.55182755", "0.5516755", "0.5515027", "0.55133367", "0.551082", "0.5506753", "0.55016637", "0.5499315", "0.54981244", "0.54923177", "0.5489443", "0.5489317", "0.54857254", "0.5484886", "0.54806453", "0.54795325", "0.54741067", "0.54740757" ]
0.0
-1
Generate next image, or if done, generate pdf.
def next(self): if self.is_done: return self.idx += 1 if self.sweep_line is not None: self.remaining_events = self.remaining_events[1:] if len(self.remaining_events) == 0: # End of everything if self.sweep_line is None: self.is_done = True return else: self.sweep_line = None return self.sweep_line, self.a_line = self.remaining_events[0] if self.sweep_line == self.lines[self.a_line][1]: # left current_n = self.lines[self.a_line][0] current_r = self.lines[self.a_line][2] for i in self.active_line_segments: n, _, r = self.lines[i] if r < current_r: self.overlap_graph.add_edge(n, current_n) # self.interval_graph.add_edge(n, current_n) self.active_line_segments.append(self.a_line) elif self.sweep_line == self.lines[self.a_line][2]: # right self.active_line_segments.remove(self.a_line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate(self, filename):\n time_0 = time.time()\n self._per_pdf(filename)\n time_1 = time.time()\n time_total = time_1 - time_0\n time_per_image = time_total / (self.beads_num * self.images_num)\n time_min = time_total / 60\n time_sec = time_total % 60\n print(\"Total time: %i minutes %02d seconds\" % (time_min, time_sec))\n print(\"Time per-image: %0.5f\" % time_per_image)\n self.time_sec = time_per_image", "def next_image(self):\n self.image_path.next()\n\n if self.slideshow.is_active():\n self.slideshow.reset_timer()\n\n if self.sound and self.slideshow.is_active():\n self.beep()", "def next():\n\n def next_image(path_file):\n path = os.getcwd()+path_file\n dirs = os.listdir(path)\n CEToolkit.contador_wraps += 1\n if CEToolkit.contador_wraps == len(dirs):\n CEToolkit.contador_wraps = 0\n path_image = path + dirs[CEToolkit.contador_wraps]\n parent.ui.label_design_image.setPixmap(QtGui.QPixmap(path_image))\n\n if CEToolkit.band_wraps_button == 1:\n path_file = \"/media/img/design/wraps/\"\n next_image(path_file)", "def get_next_image(self):\n raise NotImplementedError", "def draw_pdf_files(self):\n def thread_extract_image(self, widget, tmp_folder):\n images = pdf_to_jpeg((widget.data['path'], tmp_folder, 0, 1, 'Cover', self.get_poppler_path()))\n if t.retrieve_setting(DB.settings.store_covers):\n t.save_image_as_blob(images[0], height=self.figure_height, md5=widget.data['md5'])\n\n def thread_set_blob_image(rv, tmp_folder):\n tmp_cover = os.path.abspath(os.path.expanduser(tmp_folder + '/cover.webp'))\n with open(tmp_cover, 'wb') as output_file:\n output_file.write(rv[DB.files.cover])\n\n if 'pdf_files' not in dir(self):\n return\n\n for path in self.pdf_files:\n if self.figure_height + self.pdf_ht > self.canvas.height():\n break\n\n if platform.system() == \"Windows\" and not self.get_poppler_path():\n continue\n\n if self.pdf_files[path]['drawn']:\n continue\n\n # this is not md5, more like a quick-budget checksum\n md5 = t.md5_hash_file(path, partial_file=True)\n md5 += str(os.path.getsize(path))\n md5 = t.md5_hash_string(md5)\n\n\n self.pdf_files[path]['drawn'] = True\n widget = PDFWidget(self.canvas, self, type='PDF')\n self.widgets['main'].append(widget)\n widget.data = self.pdf_files[path]\n widget.data['md5'] = md5\n widget.data['work'] = False\n widget.data['error'] = False\n widget.post_init()\n\n rv = sqlite.ro('select * from files where md5 = (?)', md5)\n tmp_folder = t.tmp_folder()\n\n if rv and rv[DB.files.converted]:\n widget.status_label.setText('SIMILAR FILE PROCESSED')\n widget.status_label.setStyleSheet('background-color: darkGreen ; color: white')\n\n if not rv:\n query, values = sqlite.empty_insert_query(table='files')\n values[DB.files.md5] = widget.data['md5']\n sqlite.w(query, values)\n\n if rv and rv[DB.files.cover]:\n t.start_thread(\n thread_set_blob_image, worker_arguments=(rv, tmp_folder,),\n finished_function=widget.set_pixmap, finished_arguments=(tmp_folder, True,),\n threads=4, name='refresh'\n )\n else:\n t.start_thread(\n thread_extract_image, worker_arguments=(self, widget, tmp_folder,),\n finished_function=widget.set_pixmap, finished_arguments=(tmp_folder, True,),\n threads=4, name='refresh'\n )\n\n if self.dev_mode:\n return\n\n t.start_thread(self.dummy, finished_function=self.draw_pdf_files, name='gui', priority=1)\n break", "def generate_image(self):\n pass", "def processImages(self):\n for file in os.listdir(self.config[\"tempPath\"]):\n self.logger.debug(\"Calling generateImages for the file: {0}\".format(file))\n self.generateText(file)", "def nextPicture(self):\n\t\tif self.currentPicture == self.totalPictures-1:\n\t\t\tself.currentPicture = 0\n\t\telse:\n\t\t\tself.currentPicture += 1\n\t\tself.loadImage(self.picPaths[self.currentPicture])", "def run(self):\n generated_gif = self.generate()\n with open(self.out_filename, 'wb') as out_fd:\n out_fd.write(generated_gif)", "def _getAllPageImages(context, size=(320, 452)):\n pdf = context.get_review_pdf()\n # import pdb; pdb.set_trace()\n if pdf:\n pdf_data = pdf[\"blob\"].open().read()\n if not pdf or not pdf_data:\n return \"%s has no pdf\" % (context.absolute_url()), None\n else:\n # Split the pdf, one file per page\n try:\n split_pdf_pages = RunSubprocess(\"pdftk\", output_params=\"burst output\")\n except SubprocessException, e:\n return e\n split_pdf_pages.create_tmp_input(suffix=\".pdf\", data=pdf_data)\n split_pdf_pages.create_tmp_output_dir()\n split_pdf_pages.output_path = os.path.join(\n split_pdf_pages.tmp_output_dir, \"%04d.pdf\"\n )\n split_pdf_pages.run()\n\n msg = tuple()\n if split_pdf_pages.errors != \"\":\n msg += (\"Message from split_pdf_pages:\" \"\\n%s\\n\" % split_pdf_pages.errors,)\n\n # Convert the pages to .gifs\n # rewritten to have one converter step per page as we have seen process\n # sizes larger than 2GB for 60 pages in a batch\n for filename in glob.glob(split_pdf_pages.tmp_output_dir + \"/*.pdf\"):\n pdf_to_image = RunSubprocess(\n \"convert\",\n input_params=\"-density 250\",\n input_path=filename,\n output_params=\"-resize %sx%s -background white -flatten\"\n % (size[0], size[1]),\n )\n outputname = \".\".join(filename.split(\"/\")[-1].split(\".\")[:-1]) + \".gif\"\n pdf_to_image.output_path = os.path.join(\n split_pdf_pages.tmp_output_dir, outputname\n )\n pdf_to_image.run()\n if pdf_to_image.errors != \"\":\n msg += (\"Message from pdfs_to_images:\" \"\\n%s\\n\" % pdf_to_image.errors,)\n\n pdf_to_image.clean_up()\n\n imgfiles = [\n gif\n for gif in os.listdir(split_pdf_pages.tmp_output_dir)\n if os.path.splitext(gif)[1] == \".gif\"\n ]\n imgfiles.sort()\n\n pages = []\n for img in imgfiles:\n img = open(os.path.join(split_pdf_pages.tmp_output_dir, img), \"r\")\n img_data = img.read()\n pages.append(img_data)\n img.close()\n\n # Remove temporary files\n split_pdf_pages.clean_up()\n\n if pages:\n imgfields = []\n for img in pages:\n IF = ImageField()\n IF.set(context, img)\n imgfields.append(IF)\n setattr(context, \"pagePictures\", imgfields)\n\n return msg or \"Successfully converted %s pages\" % len(pages)", "def build_jpeg_preview(self, file_path, cache_path, page_id: int, extension='.jpg', size=(256,256)):\n\n # try:\n # os.mkdir(cache_path.format(d_id=document_id)+'/')\n # except OSError:\n # pass\n\n\n with open(file_path, 'rb') as odt:\n\n file_name = self.get_file_hash(file_path)\n if os.path.exists(\n '{path}{file_name}.pdf'.format(\n path=cache_path,\n file_name=file_name\n )):\n result = open(\n '{path}.pdf'.format(\n path=cache_path + file_name,\n ), 'rb')\n\n else:\n if os.path.exists(cache_path + file_name + '_flag'):\n time.sleep(2)\n self.build_pdf_preview(\n file_path=file_path,\n cache_path=cache_path,\n extension=extension\n )\n else:\n result = file_converter.office_to_pdf(odt, cache_path, file_name)\n\n input_pdf = PdfFileReader(result)\n output_pdf = PdfFileWriter()\n output_pdf.addPage(input_pdf.getPage(int(page_id)))\n output_stream = BytesIO()\n output_pdf.write(output_stream)\n output_stream.seek(0, 0)\n result2 = file_converter.pdf_to_jpeg(output_stream, size)\n\n\n\n file_name = self.get_file_hash(file_path, size)\n\n with open(\n '{path}{file_name}_{page_id}_{extension}'.format(\n file_name=file_name,\n path=cache_path,\n page_id=page_id,\n extension=extension\n ),\n 'wb') \\\n as jpeg:\n buffer = result2.read(1024)\n while buffer:\n jpeg.write(buffer)\n buffer = result2.read(1024)", "def process_pdf(pdf):\n\n if os.path.exists(legend_images_dir):\n subprocess.call([\"rm\", \"-rf\", legend_images_dir])\n os.makedirs(legend_images_dir)\n\n if os.path.exists(plot_images_dir):\n subprocess.call([\"rm\", \"-rf\", plot_images_dir])\n os.makedirs(plot_images_dir)\n\n if os.path.exists(csv_output_dir):\n subprocess.call([\"rm\", \"-rf\", csv_output_dir])\n os.makedirs(csv_output_dir)\n\n if os.path.exists(pdf_output_dir):\n subprocess.call([\"rm\", \"-rf\", pdf_output_dir])\n os.makedirs(pdf_output_dir)\n\n genImages(pdf)", "def onNext(self, event):\n\t\tself.nextPicture()", "def generate_pdf_background(pisafile, pagesize, is_portrait, context={}):\n # don't move up, we are preventing circular import\n from xhtml2pdf.xhtml2pdf_reportlab import PmlImageReader\n output = pisaFileObject(None, \"application/pdf\") # build temporary file\n img = PmlImageReader(\n WaterMarks.get_img_with_opacity(pisafile, context)\n )\n x, y, width, height = WaterMarks.get_size_location(img, context, pagesize, is_portrait)\n\n canvas = Canvas(output.getNamedFile(), pagesize=pagesize)\n canvas.drawImage(img, x, y, width, height, mask='auto')\n\n \"\"\"\n iw, ih = img.getSize()\n pw, ph = pagesize\n\n width = pw # min(iw, pw) # max\n wfactor = float(width) / iw\n height = ph # min(ih, ph) # max\n hfactor = float(height) / ih\n factor_min = min(wfactor, hfactor)\n factor_max = max(wfactor, hfactor)\n \n if is_portrait:\n w = iw * factor_min\n h = ih * factor_min\n canvas.drawImage(img, 0, ph - h, w, h)\n else:\n h = ih * factor_max\n w = iw * factor_min\n canvas.drawImage(img, 0, 0, w, h)\n \"\"\"\n canvas.save()\n\n return output", "def animator_pdf_maker(rounds, pump_index):\n print(\"making pdf's and animations.\")\n space = ('wavelength', 'freequency', 'time')\n for sp in space:\n file_loc = 'output/output'+str(pump_index)+'/figures/'+sp+'/'\n strings_large = ['convert '+file_loc+'00.png ']\n for i in range(4):\n strings_large.append('convert ')\n for ro in range(rounds):\n for i in range(4):\n strings_large[i+1] += file_loc+str(ro)+str(i+1)+'.png '\n for w in range(1, 4):\n if i == 5:\n break\n strings_large[0] += file_loc+str(ro)+str(w)+'.png '\n for i in range(4):\n os.system(strings_large[i]+file_loc+str(i)+'.pdf')\n\n file_loca = file_loc+'portA/'\n file_locb = file_loc+'portB/'\n string_porta = 'convert '\n string_portb = 'convert '\n for i in range(rounds):\n string_porta += file_loca + str(i) + '.png '\n string_portb += file_locb + str(i) + '.png '\n\n string_porta += file_loca+'porta.pdf '\n string_portb += file_locb+'portb.pdf '\n os.system(string_porta)\n os.system(string_portb)\n\n for i in range(4):\n os.system(\n 'convert -delay 30 '+file_loc+str(i)+'.pdf '+file_loc+str(i)+'.mp4')\n os.system('convert -delay 30 ' + file_loca +\n 'porta.pdf ' + file_loca+'porta.mp4 ')\n os.system('convert -delay 30 ' + file_locb +\n 'portb.pdf ' + file_locb+'portb.mp4 ')\n\n for i in (file_loc, file_loca, file_locb):\n print('rm ' + i + '*.png')\n os.system('rm ' + i + '*.png')\n os.system('sleep 5')\n return None", "def getNextImage(self):\n self._images = self._api.updateImageNames()\n \n # Get index from local txt file. \n # This ensures that the image queue does not reset if the Pola restarts.\n try: \n f = open(\"memoryIndex.txt\", 'r')\n self._currentIndex = int((f.read()))\n f.close()\n except: \n self._currentIndex = -1\n \n self._currentIndex = (self._currentIndex + 1) % len(self._images)\n \n f = open(\"memoryIndex.txt\", 'w')\n f.write(str(self._currentIndex))\n f.close()\n \n \n # If there is an internet connection, go online. If not, get the \"no wifi error\"- image queue\n try:\n urllib.request.urlopen('http://torabodin.com/')\n try: \n imageName = self._api.downloadImage(self._currentIndex)\n print(1, imageName)\n self._image= self.loadImage(imageName, True)\n print (self._image)\n \n except: \n self._image = self.getNextImage()\n \n except:\n self._image = self.loadImage(None, False)\n \n \n return self._image", "def convert_pdf_to_images(self, inputpath, outputpath, widget):\n tmp_jpeg_folder = t.tmp_folder(inputpath, hash=True, delete=True)\n tmp_folder = t.tmp_folder(outputpath, hash=True, delete=True)\n\n image_list = []\n\n poppler_path = self.get_poppler_path()\n widget.status_label.setText('EXTRACTING')\n if self.pdf_threads.isChecked():\n rv = self.decide_pages_per_cpu(inputpath)\n if rv:\n image_list = convert_files_to_jpeg(\n rv, inputpath, tmp_jpeg_folder, poppler_path)\n\n if not image_list:\n image_list = pdf_to_jpeg((inputpath, tmp_jpeg_folder, None, None, None, poppler_path,))\n\n if not image_list:\n return False\n\n jobs = []\n\n for count, jpeg_image_path in enumerate(image_list):\n filename = t.zero_prefiller(count, lenght=5)\n webp_save_path = f'{tmp_folder}/{filename}.webp'\n webp_save_path = os.path.abspath(os.path.expanduser(webp_save_path))\n\n jobs.append(\n (jpeg_image_path, webp_save_path, outputpath, self.webp_slider.value(), self.check_4k.isChecked(),)\n )\n\n widget.status_label.setText('CONVERTING')\n if not self.wepb_threads.isChecked():\n for i in jobs:\n convert_files_to_webp([i])\n else:\n convert_files_to_webp(jobs)\n\n widget.status_label.setText('RECOMPRESSING')\n rv = recompress_fucntion(outputpath, tmp_folder)\n\n return dict(status=rv, tmp_webp_folder=tmp_folder, tmp_jpeg_folder=tmp_jpeg_folder, outputpath=outputpath)", "def forward():\n global my_iterator, iterable, canvas, help_window, forward_button, image\n try:\n iterable = next(my_iterator)\n pill_image = Image.open(iterable)\n image = ImageTk.PhotoImage(pill_image)\n canvas.create_image(10, 10, anchor=NW, image=image)\n help_window.mainloop()\n except StopIteration:\n forward_button.destroy()", "def next(self):\n if self.currentframe < (self.nframes - 1) and self.nframes > 1:\n return self.getframe(self.currentframe + 1)\n else:\n newobj = pixiimage()\n newobj.read(next_filename(\n self.sequencefilename))\n return newobj", "def set_next_image(self, image):\n raise NotImplementedError", "def pdftoimages(input_dir,output_dir): \n dirListing = os.listdir(input_dir)\n files = []\n imagespath = output_dir\n for item in dirListing:\n files.append(item)\n n = len(files)\n for num in range(n):\n doc = fitz.open(input_dir+\"/\"+files[num])\n for img in doc.getPageImageList(0):\n xref = img[0]\n pix = fitz.Pixmap(doc, xref)\n if pix.n < 5: # this is GRAY or RGB\n pix.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n else: # CMYK: convert to RGB first\n pix1 = fitz.Pixmap(fitz.csRGB, pix)\n pix1.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n pix1 = None \n pix=None\n break", "def create_pdf(self):\n\n my_datetime = datetime.now()\n self.pdf_name = (\n self.pdf_name + \"_\" + my_datetime.strftime(\"%H%M_%d%m%Y\") + \".pdf\"\n )\n fig_width = aW * self.column_ratio[0]\n\n clm_width_meta = (aW * self.column_ratio[1]) / len(self.fields)\n\n c = canvas.Canvas(os.path.join(self.pdf_folder, self.pdf_name), pagesize=A4)\n\n for qc_run_id, fig_file in sorted(self._files.items()):\n (param_values, feature_values) = get_param_values(\n qc_run_id, self.db_name, return_meta_add_on=True\n )\n\n comment = self.subject + \"<br/>\"\n # c.saveState()\n title = \"Dataset \" + qc_run_id\n\n # Prepare header\n header = Paragraph(title, title_style)\n h_w, h_h = header.wrap(aW, aH)\n\n # Prepare image\n img = ImageReader(fig_file)\n im_width, im_height = img.getSize()\n aspect = im_height / float(im_width)\n fig_height = fig_width * aspect\n\n # Prepare metadata section\n\n meta_table = Table(\n param_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n meta_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n\n meta_width, meta_height = meta_table.wrap(aW - im_width, aH / 2)\n\n # Prepare comments header\n comments_header = Paragraph(\"Comments:\", title_style)\n avail_height = aH - fig_height - v_padding\n comm_h_width, comm_h_height = comments_header.wrap(\n im_width, avail_height # aW - meta_width,\n )\n # Prepare comments\n my_datetime = datetime.now()\n ts = \"Printed on \" + my_datetime.strftime(\"%c\")\n\n try:\n data_specific_comment = self.comments[int(qc_run_id)]\n comment += data_specific_comment + \"<br/>\"\n comment += self.comments[\"general\"] + \"<br/>\"\n\n comment += self.smalltalk + \"<br/>\"\n except Exception:\n logger.warning(\n \"Unable to summarize result of \" + \"dataset {}\".format(qc_run_id)\n )\n comment_ts = comment + ts\n comment_ts = textwrap.fill(comment_ts, 70)\n comment_ts = comment_ts.replace(\"\\n\", \"<br/>\")\n\n comments_p = Paragraph(comment_ts, body_style)\n\n avail_height = aH - fig_height - v_padding - comm_h_height\n\n comm_width, comm_height = comments_p.wrap(im_width, avail_height) # aW,\n\n line_widths = comments_p.getActualLineWidths0()\n number_of_lines = len(line_widths)\n if number_of_lines > 1:\n pass\n if number_of_lines == 1:\n min(line_widths)\n comm_width, comm_height = comments_p.wrap(im_width, avail_height)\n\n # Prepare features\n feat_table = Table(\n feature_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n feat_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n avail_height = aH - meta_height # fig_height - v_padding - comm_h_height\n avail_height -= comm_height\n feat_width, feat_height = feat_table.wrap(aW - im_width, avail_height)\n\n # Draw everyting on canvas\n\n header.drawOn(c, left_margin, aH - top_margin)\n\n c.drawImage(\n img,\n left_margin,\n aH - top_margin - fig_height - v_padding,\n width=fig_width * 1.1,\n height=fig_height * 1.1,\n mask=\"auto\",\n )\n\n meta_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2, # - v_padding\n )\n\n comments_header.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - fig_height\n - 2 * v_padding, # - add_on_height\n )\n\n comments_p.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - comm_height\n - fig_height\n - 2 * v_padding\n - comm_h_height, # - add_on_height\n )\n\n feat_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2 - feat_height - v_padding,\n # top_margin - fig_height - 2*v_padding - feat_height\n )\n\n # new page\n c.showPage()\n c.saveState()\n\n c.save()", "def _image(self):\n print(\"imaging\")\n self.images.append(self.device_control.image())\n yield", "def generateImage(self):\n self.image = self.font.render(self.text, True, self.color)\n self.rect = self.image.get_rect()\n self.rect.center = self.xy", "def make_pdf(self):\n source = self.get_page_source()\n if not source:\n self.errors.append('no_source')\n if not self.errors:\n self.generate_pdf_file(source)", "def genImage(self, img_num=1, mode=\"stabilization\"):\n self.Gmodel.eval()\n with torch.no_grad():\n for i in range(img_num):\n latent_z = torch.randn(1, 512, 1, 1).normal_().to(self.device)\n output = self.Gmodel(latent_z, mode)\n print(\"output size: \", output.size())\n output = torch.clamp(output, min=0, max=1)\n output = output.cpu().squeeze().numpy()\n fake_img = output.transpose(1, 2, 0)\n print(\"fake image size: \", fake_img.shape)\n plt.imshow(fake_img)\n plt.show()\n save_file = os.path.join(self.save_dir, str(self.load_resl), \"%05d.jpg\" % i)\n os.makedirs(os.path.dirname(save_file), exist_ok=True)\n plt.imsave(save_file, fake_img)", "def draw_image(self):\n self.PDF.saveState()\n self.PDF.scale(1, -1)\n # self.PDF.drawImage(\n # LOGO, 490, -78, width=80, preserveAspectRatio=True, mask=\"auto\"\n # )\n self.PDF.restoreState()", "def pdf_page_to_png(src_pdf, pagenum=0, resolution=154):\n\n #check_dependencies(__optional_dependencies__['pdf'])\n # Import libraries within this function so as to avoid import-time dependence\n\n dst_pdf = PyPDF2.PdfFileWriter()\n src_pdf = w(filename=src_pdf,resolution=300)\n dst_pdf.addPage(src_pdf.getPage(pagenum))\n\n pdf_bytes = io.BytesIO()\n dst_pdf.write(pdf_bytes)\n pdf_bytes.seek(0)\n\n img = Image(file=pdf_bytes, resolution=resolution)\n \n with img.convert('png') as converted:\n converted.save(filename='converted.png')\n return img", "def myLaterPages(canvas, doc):\n\n canvas.saveState()\n canvas.setFont('Times-Roman',9)\n #url = pyqrcode.create(canvas.getPageNumber())\n #url.png('qr.png', scale=8)\n\n #canvas.drawInlineImage(\"qr.png\", 530, 0.75*inch, width=45,height=45)\n canvas.drawString(inch, 0.75 * inch, \"Page %d \" % (doc.page))\n canvas.restoreState()", "def generatePageImages(self, later=True):\n result = \"\"\n status = 1\n # make this asyncronous\n async = component.getUtility(IAsyncService)\n async_args = (self.context, (800, 1131))\n when = datetime.datetime.now(pytz.UTC) + datetime.timedelta(seconds=600)\n try:\n if later:\n async.queueJobWithDelay(None, when, _getAllPageImages, *async_args)\n else:\n apply(_getAllPageImages, async_args)\n except (component.ComponentLookupError, KeyError):\n logger.error(\"Could not setup async job, running synchronous\")\n apply(_getAllPageImages, async_args)\n # try:\n # result, pageimages = self._getAllPageImages((800,1131))\n # except SubprocessException, e:\n # result = \"Missing converter? -> \" + str(e)\n # pageimages = None\n if result:\n logger.warn(\"popen: %s\" % (result))\n if \"Error:\" in result:\n status = 0\n # if pageimages:\n # imgfields = []\n # for img in pageimages:\n # IF = ImageField()\n # IF.set(self.context, img)\n # imgfields.append(IF)\n # setattr(self.context, 'pagePictures', imgfields)\n return status", "def run(self):\n for filepage in self.generator:\n print (filepage)\n filepage.touch()", "def gen_flowable(self):\n flow = randint(0,randint(1,2)) ## Weighted random numbers to balance text to image ratio\n if flow == 0:\n try:\n photo = self.photos[randint(0, len(self.photos)-1)]\n img = Image(self.dict['img'][photo]['src'], self.dict['img'][photo]['dimensions'][0], self.dict['img'][photo]['dimensions'][1])\n img.wrap(A6[0],A6[1])\n img.hAlign = ['CENTER', 'LEFT', 'RIGHT'][randint(0,3)]\n return img\n except:\n return Spacer(randint(0, 100), randint(0, 100))\n elif flow == 1:\n try:\n quote = self.quotes[randint(0, len(self.quotes)-1)]\n q_sheet = randint(0, 5)\n #return Paragraph(quote, Stylesheet.stylesheet['quote'][q_sheet])\n return Paragraph(cleanhtml(quote), Stylesheet.stylesheet['title'])\n except:\n return Spacer(randint(0, 100), randint(0, 100))\n elif flow == 2:\n try:\n tainr = self.containers[randint(0, (len(self.containers)-1))]\n c_sheet = randint(0, 1)\n #return Paragraph(self.dict['txt'][tainr]['text'], Stylesheet.stylesheet['quote'][q_sheet])\n return Paragraph(cleanhtml(self.dict['txt'][tainr]['text']), Stylesheet.stylesheet['default'])\n except:\n return Spacer(randint(0, 100), randint(0, 100))\n else:\n return Spacer(randint(0, 100), randint(0, 100))", "def next_sample(self):\n if self.cur >= len(self.seq):\n raise StopIteration\n idx = self.seq[self.cur]\n self.cur += 1\n s = self.imgrec.read_idx(idx)\n header, img = recordio.unpack(s)\n img = mx.image.imdecode(img).asnumpy()\n hlabel = np.array(header.label).reshape( (self.num_classes,2) )\n if not config.label_xfirst:\n hlabel = hlabel[:,::-1] #convert to X/W first\n annot = {'scale': config.base_scale}\n\n #ul = np.array( (50000,50000), dtype=np.int32)\n #br = np.array( (0,0), dtype=np.int32)\n #for i in range(hlabel.shape[0]):\n # h = int(hlabel[i][0])\n # w = int(hlabel[i][1])\n # key = np.array((h,w))\n # ul = np.minimum(key, ul)\n # br = np.maximum(key, br)\n\n return img, hlabel, annot", "def myFirstPage(canvas, doc):\n\n canvas.saveState()\n #url = pyqrcode.create(canvas.getPageNumber())\n #url.png('qr.png', scale=8)\n\n #canvas.drawInlineImage('qr.png', 45, 760, width=60,height=60)\n canvas.rect(370,760,200,40)\n canvas.setFont('Times-Bold',16)\n\n canvas.drawCentredString(PAGE_WIDTH/2.0, PAGE_HEIGHT-130, Title)\n canvas.setFont('Times-Roman',9)\n canvas.drawString(inch, 0.75 * inch, \"Page %d\" % canvas.getPageNumber())\n canvas.restoreState()", "def appendpics(pathofimg, w_sub, h_sub, step):\n num = 0\n dirlist = []\n images = [] # images in each folder\n for root, dirs, fileswer in os.walk(pathofimg):\n if len(dirs)!= 0:\n for dir in dirs:\n dirlist.append(dir)\n for rooert, dirwerwes, files in os.walk(pathofimg+'/'+dir):\n for file in files:\n if(file.endswith('.png')):\n images.append(Image.open(pathofimg+'/'+dir+'/'+file))\n if(len(images)==81):\n break\n target = montage(images, w_sub, h_sub, step)\n target.save(pathofimg +'/'+ dir + '.png', quality=100)\n else:\n dir = 'Generated'\n for file in fileswer:\n if (file.endswith('.png')):\n images.append(Image.open(pathofimg +'/'+ file))\n target1 = montage(images, w_sub, h_sub, step)\n savepath = pathofimg +'/'+ 'generated'\n os.makedirs(savepath)\n target1.save(savepath +'/'+ dir + '.png', quality=100)", "def convert_pdf_to_image(source: str, dpi: int, output_folder: str) -> None:\n logger.info('Starting conversion')\n pages = convert_from_path(source, dpi)\n number = 0\n for page in pages:\n filename = os.path.join(output_folder, ''.join([str(number), '.jpg']))\n page.save(filename)\n logger.info(f'Processed {number} of {len(pages)}')\n number += 1\n logger.info('Finished conversion')", "def advance_image():\n # pylint: disable=global-statement\n global current_image\n if current_image is not None:\n current_image += 1\n if current_image is None or current_image >= len(file_list):\n current_image = 0\n load_image()", "def next(self):\n if self.currentframe < (self.nframes - 1) and self.nframes > 1:\n return self.getframe(self.currentframe + 1)\n else:\n newobj = hdf5image()\n newobj.read(next_filename(self.filename))\n return newobj", "def _generate_attachment(self):\n Attachment = self.env['ir.attachment']\n ReportXml = self.env['ir.actions.report.xml']\n Report = self.env['report']\n pages = {}\n for current_order in self:\n report = ReportXml.search([('model', '=', current_order.res_model)], limit=1)\n if current_order.attachment_id: # compute page number\n # avoid to recompute the number of page each time for the attachment\n nbr_pages = pages.get(current_order.attachment_id.id)\n if not nbr_pages:\n nbr_pages = current_order._count_pages_pdf(current_order.attachment_id.datas.decode('base64'))\n pages[current_order.attachment_id.id] = nbr_pages\n current_order.write({\n 'nbr_pages': nbr_pages\n })\n elif not current_order.attachment_id and current_order.res_model and current_order.res_id and report: # check report\n # browse object and find its pdf (binary content)\n object_to_print = self.env[current_order.res_model].browse(current_order.res_id)\n bin_pdf = Report.get_pdf(object_to_print, report.report_name)\n\n # compute the name of the new attachment\n filename = False\n if report.attachment:\n filename = safe_eval(report.attachment, {'object': object_to_print, 'time': time})\n if not filename:\n filename = '%s-%s' % (current_order.res_model.replace(\".\", \"_\"), current_order.res_id)\n\n # create the new ir_attachment\n attachment_value = {\n 'name': filename,\n 'res_name': filename,\n 'res_model': current_order.res_model,\n 'res_id': current_order.res_id,\n 'datas': base64.b64encode(bin_pdf),\n 'datas_fname': filename+'.pdf',\n }\n new_attachment = Attachment.create(attachment_value)\n\n # add the new attachment to the print order\n current_order.write({\n 'nbr_pages': self._count_pages_pdf(bin_pdf),\n 'attachment_id': new_attachment.id\n })\n elif not current_order.attachment_id and current_order.res_model and current_order.res_id and not report: # error : no ir.actions.report.xml found for res_model\n current_order.write({\n 'state': 'error',\n 'error_message': _('The document you want to print and send is not printable. There is no report action (ir.actions.report.xml) for the model %s.') % (current_order.res_model,)\n })\n else: # error : not attachament can be generate, no attach_id or no res_model/res_id\n current_order.write({\n 'state': 'error',\n 'error_message': _('The document has no associated PDF : you have to give select an Attachment file, or set up the Object ID and Model Name fields.')\n })", "def save_images(self):\n for q in range(self.N_itr):\n plt.clf()\n self.plot_EM_estimate(q)\n plt.savefig('img%d.png' % (100 + q))", "def generate_images(self, image_idx, is_training, batch_size=16):\n \n # arrays to store our batched data\n images, ages, races, genders = [], [], [], []\n while True:\n for idx in image_idx:\n person = self.df.iloc[idx]\n \n age = person['age']\n race = person['race_id']\n gender = person['gender_id']\n file = person['file']\n \n im = self.preprocess_image(file)\n \n ages.append(age / self.max_age)\n races.append(to_categorical(race, len(dataset_dict['race_id'])))\n genders.append(to_categorical(gender, len(dataset_dict['gender_id'])))\n images.append(im)\n \n # yielding condition\n if len(images) >= batch_size:\n yield np.array(images), [np.array(ages), np.array(races), np.array(genders)]\n images, ages, races, genders = [], [], [], []\n \n if not is_training:\n break", "def image_generator(self, some_messages):\n offset = 0\n outer = 0\n inner = 0\n\n for a_message in some_messages:\n msg_id = a_message.gmail_id\n for att in a_message.attachments():\n if att.type in ATTACHMENT_MIMES:\n att_type = att.type.split(\"/\")[1]\n an_image = Image(a_message, att)\n\n # map each image id with a corresponding message id for later parsing\n if an_image.id in self.mapping:\n self.mapping[msg_id].append(a_message)\n else:\n self.mapping[msg_id] = [a_message]\n\n self.num_attachments = self.count_attachments(self.num_attachments)\n\n yield an_image", "def gen(camera):\n frame = camera.get_frame(wait=False) # allow fast start\n if frame is not None: # send image twice... otherwise chrome won't display it...\n yield get_mjpeg_image(frame) + get_mjpeg_image(frame)\n\n while True:\n frame = camera.get_frame()\n yield get_mjpeg_image(frame) + get_mjpeg_image(frame)", "def buildPDF(self):\n\n # TODO: get this working\n # TODO: make this configurable via a dialog\n os.chdir(self.file_path.parent)\n proc = subprocess.Popen(\n [\"make\", \"latexpdf\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n proc.wait()\n for line in proc.stdout:\n print(\"stdout: \" + line.rstrip())", "def _compute_single_pdf(self, **kwargs):\n raise NotImplementedError", "def build(self, buffer=None) -> any:\n\n # init PDF\n if buffer:\n self.PDF = canvas.Canvas(buffer, pagesize=letter, bottomup=0)\n else:\n self.PDF = canvas.Canvas(self.buffer, pagesize=letter, bottomup=0)\n\n # run funcs\n\n self.add_title()\n self.draw_image()\n self.add_org_details()\n self.add_customer_details()\n self.draw_line(225)\n self.add_due_date()\n self.add_subscription()\n\n # save the pdf\n\n self.PDF.save()\n return self.PDF", "def generate_page_pdf(self, src, out, options=None):\n loger.info(\"start pdf creation\")\n with open(src, 'r') as f:\n print(pdfkit.from_file(f, out, options=self.__soft_cover_options if not options else\n \"Lol\"))\n loger.info(\"end pdf creation\")", "def upload_next_img(self):\n current_img = self.listImages.currentImage\n if current_img != None:\n next_img = self.get_next_img(current_img)\n self.upload_img(next_img)\n else:\n pass", "def create_pdf(codes_array, filename):\n total_gentime, total_savetime = 0, 0\n for pdf_part in range(len(codes_array) // 1000 + 1):\n upper_bound = len(codes_array)\n if upper_bound > (pdf_part + 1) * 1000:\n upper_bound = (pdf_part + 1) * 1000\n pdf = FPDF(orientation='P', unit='mm', format='A4')\n start = time.time()\n for code_index in range(pdf_part * 1000, upper_bound, 8):\n upper = code_index + 8\n if upper > upper_bound - 1:\n upper = upper_bound - 1\n create_pdf_page(codes_array, code_index, upper, pdf)\n stop = time.time()\n total_gentime += (stop - start)\n start = time.time()\n filepath = 'api/public/%s.pdf' % (filename + \"_\" + str(pdf_part))\n filepath = os.path.abspath(filepath)\n pdf.output(filepath, 'F')\n stop = time.time()\n total_savetime += (stop - start)\n print(\"Total:\\n Pdf generation time: %.2f seconds\\n Pdf save time: %.2f seconds\\n\" %\n (total_gentime, total_savetime))\n start = time.time()\n export_path = merge_pdf()\n stop = time.time()\n # print(\"Merge time: %.2f\" % (stop - start))\n return (export_path)", "def generate_data():\n for subdir, dirs, files in os.walk(legend_images_dir):\n for _file in files:\n getTables(_file)\n\n file_list = []\n for subdir, dirs, files in os.walk(pdf_output_dir):\n for _file in files:\n if _file.endswith('.pdf'):\n file_list.append(_file)\n\n print (\"Writing merged output in Output.pdf...\")\n current_dir = os.getcwd()\n mergeOutput(file_list, current_dir + \"/Output.pdf\")\n\n clean()", "def OnBuildPNGs(self, e):\n if (not defaults.use_tex):\n msg = \"LaTeX is disabled in the defaults.py file. To use this functionality, change the\"\n msg += \" use_tex option to True and restart the GUI.\"\n ShowMessage(msg, kind='warn')\n return\n question = \"Quantity code formulas are displayed using PNG images, which need to be generated.\"\n question += \"\\n\\n\\nImages should only be generated if they do not already exist or\"\n question += \" the quantity codes have changed, e.g., more custom outputs have been added.\"\n question += \"\\n\\n\\nThis can take ~60 sec, do you want to proceed?\"\n proceed = AskYesNo(question, title='Generate LaTeX Formula Images?')\n if (not proceed): return\n\n question = \"Choose a path where the images will be saved. The default value from defaults.py is shown.\"\n path = AskText(question, default=defaults.quantity_code_image_path, title=\"Where to store images?\")\n if (path is None): return\n defaults.quantity_code_image_path = path # user overrode this quantity, remember for later\n\n question = \"If image files already exist, do you want to overwrite them?\"\n overwrite = AskYesNo(question, title='Overwrite Existing Files?')\n\n # call render routine and display a progress bar\n Nq = len(self.mainparent.nmlpanel.output_quantities.quantities)\n offsets = list(self.mainparent.nmlpanel.output_quantities.offsets.keys())\n\n P = ProgressBar(Nq)\n P(0)\n for i,Q in enumerate(self.mainparent.nmlpanel.output_quantities.quantities):\n if (Q.name in offsets): continue\n render_tex(Q.code, Q.tex, defaults.quantity_code_image_path, overwrite=overwrite)\n\n P(i+1) # update progress bar", "def make_processing_one_image(self, abspath_image, move_img_bool):\n start_time = datetime.now().replace(microsecond=0)\n abspath_image = abspath_image.rstrip('/')\n img_name = os.path.basename(abspath_image)\n\n if Utils.check_tif_and_metadata(abspath_image, log_obj=self.__log):\n try:\n data = self.__make_processing(\n img_name, abspath_image, 'foot_1'\n )\n except Exception as ex:\n self.__log.error(\n '\\nErro ao processar a imagem {} (Inserção '\n 'em banco interrompida)\\n{}\\n'.format(abspath_image, ex)\n )\n else:\n self.__dao.insert_catalog_rapideye(data, start_time)\n\n if move_img_bool:\n Utils.move_dir(abspath_image, DESTINY_RAPIDEYE, self.__log)\n else:\n sys.exit()", "def imagePages(files, choice):\n options = [\"Byte\", \"Markov\", \"Hilbert\"]\n type = options[int(ui.prompt(\"Choose a visualization type\", options))]\n\n targets = []\n pageNames = []\n pageSize = 100\n pages = range(math.ceil(len(files)/pageSize))\n for page in pb.progressbar(pages):\n # print(\"\\nPage {}/{}\".format(page+1, len(pages)))\n gc.collect() # Garbage collect\n\n images = []\n start = page*pageSize\n if choice == \"Create\":\n images, targets = buildImages(files[start:start+pageSize], targets, type)\n elif choice == \"Load\":\n images, targets = loadImages(files[start:start+pageSize], targets)\n pageNames.append(\"./pages/images_page{}.npy\".format(page))\n np.save(pageNames[-1], images)\n return targets, pageNames", "def to_pdf(self, imageFileName, outFileName, fontname=\"Times-Roman\", fontsize=10, withVisibleOCRText=False, withVisibleImage=True, withVisibleBoundingBoxes=False):\n if self.hocr is None:\n # warn that no text will be embedded in the output PDF\n print \"Warning: No hOCR file specified. PDF will be image-only.\"\n \n im = Image.open(imageFileName)\n imwidthpx, imheightpx = im.size\n if 'dpi' in im.info:\n width = float(im.size[0])/im.info['dpi'][0]\n height = float(im.size[1])/im.info['dpi'][1]\n else:\n # we have to make a reasonable guess\n # set to None for now and try again using info from hOCR file\n width = height = None\n \n \n ocr_dpi = (300, 300) # a default, in case we can't find it\n \n # get dimensions of the OCR, which may not match the image\n if self.hocr is not None:\n for div in self.hocr.findall(\".//%sdiv\"%(self.xmlns)):\n if div.attrib['class'] == 'ocr_page':\n coords = self.element_coordinates(div)\n ocrwidth = coords[2]-coords[0]\n ocrheight = coords[3]-coords[1]\n if width is None:\n # no dpi info with the image\n # assume OCR was done at 300 dpi\n width = ocrwidth/300\n height = ocrheight/300\n ocr_dpi = (ocrwidth/width, ocrheight/height)\n break # there shouldn't be more than one, and if there is, we don't want it\n \n if width is None:\n # no dpi info with the image, and no help from the hOCR file either\n # this will probably end up looking awful, so issue a warning\n print \"Warning: DPI unavailable for image %s. Assuming 96 DPI.\"%(imageFileName)\n width = float(im.size[0])/96\n height = float(im.size[1])/96\n \n # create the PDF file\n pdf = Canvas(outFileName, pagesize=(width*inch, height*inch), pageCompression=1) # page size in points (1/72 in.)\n \n # put the image on the page, scaled to fill the page\n if withVisibleImage:\n pdf.drawInlineImage(im, 0, 0, width=width*inch, height=height*inch)\n \n if self.hocr is not None:\n for word in self.hocr.findall(\".//%sspan\"%(self.xmlns)):\n if word.attrib['class'] == 'ocr_word':\n coords = self.element_coordinates(word)\n content = self._get_element_text(word)\n if content.rstrip() == '':\n continue\n text = pdf.beginText()\n text.setFont(fontname, fontsize)\n if not withVisibleOCRText:\n #text.setTextRenderMode(0) # visible\n #else:\n text.setTextRenderMode(3) # invisible\n \n # set cursor to bottom left corner of line bbox (adjust for dpi)\n # Can't determine original text's baseline, but guess that ypg\n # roughly push it down by ~2/3 of line height. Correct for that.\n # PDF y coords increase going *up* the page, remember. Assume \"o\" is\n # round so width == line height.\n origin_y = (height*inch)-(float(coords[3])/ocr_dpi[1])*inch\n if re.search(r\"[gjpqy()]\", content):\n origin_y += pdf.stringWidth(\"o\") * 1/3\n if re.search(r\"[\\[\\]()]\", content):\n origin_y += pdf.stringWidth(\"o\") * 1/3.5\n elif re.search(r\"[,;]\", content):\n origin_y += pdf.stringWidth(\"o\") * 1/4\n text.setTextOrigin((float(coords[0])/ocr_dpi[0])*inch, origin_y)\n \n # scale the width of the text to fill the width of the line's bbox\n text.setHorizScale((((float(coords[2])/ocr_dpi[0]*inch)-(float(coords[0])/ocr_dpi[0]*inch))/pdf.stringWidth(content.rstrip(), fontname, fontsize))*100)\n \n # write the text to the page\n text.textLine(content.rstrip())\n pdf.drawText(text)\n \n # finish up the page and save it\n pdf.showPage()\n pdf.save()", "def next(self, event=None):\n im = self.model.getNextImage() # boolean\n if im is False:\n return\n self.viewUpdate()", "def generate_pdf(self):\n x = 100\n y = 100\n buffer = BytesIO()\n p = canvas.Canvas(buffer, pagesize=\"A4\")\n p.drawString(x, y, \"TO DO\")\n p.showPage()\n p.save()\n pdf = buffer.getvalue()\n buffer.close()\n return pdf", "def _extract_pdf_images(pdf_stream):\n\n process = subprocess.Popen([\"pdftoppm\", \"-jpeg\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n result, errors = process.communicate(pdf_stream)\n\n if errors:\n raise IOError(\"images:Could not extract Images from pdf: {0}\".format(repr(errors)))\n\n # FFD9 is an end of image marker in jpeg images\n for image in result.split(b'\\xFF\\xD9'):\n if len(image) > 11: # we can get an end of file marker after the image and a jpeg header is 11 bytes long\n yield image", "def make_image(self, imagename, fitsname, niter=500, antenna='', phasecenter='', start=200, stop=900, del_img=True, overwrite=False): \n self.generate_image(imagename, antenna=antenna, niter=niter, phasecenter=phasecenter, start=start, stop=stop)\n self.to_fits(imagename + '.image', fitsname, overwrite=overwrite)\n if del_img:\n self.remove_image(imagename, del_img=True)", "def get_next_cid(self) -> str:\n self.position += 1\n return \"img{}\".format(self.position)", "def nextGeneration(self):\n\n # Start a timer to calculate the time the render one generation.\n startTime = int(round(time.time() * 100000))\n\n self.generation += 1\n\n self.setNeighbors()\n self.checkAmountOfNeighbors()\n\n # Ends a timer to calculate the time the render one generation.\n endTime = int(round(time.time() * 100000))\n self.timeToCalcGeneration = (endTime - startTime)", "def generating(\n self,\n prompt,\n width=512,\n height=512,\n guidance_scale=7.5,\n num_images_per_prompt=1,\n num_inference_steps=50,\n generator=None,\n **kwargs,\n ):\n pipe = self.get_pipe(\"generate\")\n images = pipe(\n prompt=prompt,\n width=width,\n height=height,\n guidance_scale=guidance_scale,\n num_images_per_prompt=num_images_per_prompt,\n num_inference_steps=num_inference_steps,\n generator=generator,\n **kwargs,\n ).images\n return images", "def __generate_image(self):\n\t\tself.img = np.ones((self.size*self.width+self.border,self.size*self.width+self.border,1), np.uint8)*255\n\t\tfor i in range(len(self.matrix)):\n\t\t\tfor j in range(len(self.matrix)):\n\t\t\t\tif self.matrix[j][i] == 1:\n\t\t\t\t\tself.img = cv2.rectangle(self.img,(i*self.width+int(self.border/2),j*self.width+int(self.border/2))\n\t\t\t\t\t\t,(i*self.width+self.width+int(self.border/2),j*self.width+self.width+int(self.border/2)),(0,0,0),-1)\n\t\tif '.' in self.name:\n\t\t\tcv2.imwrite(self.name,self.img)\n\t\telse:\n\t\t\tcv2.imwrite(self.name+'.jpg',self.img)\n\t\tcv2.imshow(\"Image\",self.img)\n\t\tcv2.waitKey(0)\n\t\tcv2.destroyAllWindows()", "def show_next_image(self):\r\n self.index += 1\r\n progress_string = \"%d/%d\" % (self.index+1, self.n_paths)\r\n self.progress_label.configure(text=progress_string)\r\n \r\n display_name = \"Name = %s\" % (self.file_names[self.index])\r\n self.name_label.configure(text = display_name)\r\n \r\n #### added in version 2\r\n #sorting_string = df.sorted_in_folder[self.index].split(os.sep)[-2] #shows the last folder in the filepath before the file\r\n sorting_string = self.df.sorted_in_folder[self.index].split(\"/\")[-2]\r\n self.sorting_label.configure(text=(\"In folder: %s\" % (sorting_string)))\r\n \r\n #Add Current Label\r\n print(sorting_string)\r\n for label in labels:\r\n if label not in sorting_string:\r\n cat_string = 'Unlabelled'\r\n else:\r\n cat_string = sorting_string\r\n \r\n self.cat_label.configure(text = ('Current Category : %s' %(cat_string)))\r\n \r\n ####\r\n\r\n if self.index < self.n_paths:\r\n self.set_image(self.df.sorted_in_folder[self.index])\r\n else:\r\n self.master.quit()", "def do_preprocess(pdf_files):\n\n for pdf_file in pdf_files:\n\n base, ext = os.path.splitext(pdf_file)\n \n create_intermediate_files()\n \n # 1) split a pdf file, a page a pdf\n num_pages = pdfutil.split(os.path.join(cwd, pdf_file), DIR_PAGE)\n\n for i in xrange(1, num_pages + 1):\n\n file = '%04d.pdf' % i\n page_pdf = os.path.join(DIR_PAGE, file)\n \n pdfutil.convert_srgb(page_pdf, DIR_SRGB)\n srgb_pdf = os.path.join(DIR_SRGB, file)\n \n pdfutil.convert_vti(srgb_pdf, DIR_VTI)\n vti_pdf = os.path.join(DIR_VTI, file)\n\n pdfutil.convert_tiff(vti_pdf, DIR_TIFF)\n pdfutil.convert_text(vti_pdf, DIR_TEXT)\n\n # merge background pdf files\n pdfutil.merge_to_single_pdf(DIR_TIFF, DIR_BACK, 'back')\n background_pdf = os.path.join(DIR_BACK, 'back.pdf')\n\n # merge foreground pdf files\n output_text_pdf = '%s_text' % base\n pdfutil.merge_to_single_pdf(DIR_TEXT, DIR_TEXT, output_text_pdf)\n foreground_pdf = os.path.join(DIR_TEXT, output_text_pdf + '.pdf')\n pdfutil.export_by_preview(foreground_pdf)\n\n # merge background and foreground\n merged_pdf = os.path.join(cwd, '%s_merge.pdf' % base)\n pdfutil.merge_text_and_back(foreground_pdf, background_pdf, merged_pdf)\n\n final_pdf = '%s_final' % base\n pdfutil.optimize(merged_pdf, final_pdf)\n final_pdf = os.path.join(cwd, final_pdf + '.pdf')\n\n # aggregate what we want\n for f in (foreground_pdf, final_pdf):\n shutil.move(f, DIR_FINAL)\n \n # clean up unused\n os.unlink(merged_pdf) \n cleanup_intermediate_files()", "def write_image_to_file_incrementally(image):\r\n i = 0\r\n while os.path.exists(\"sample%s.jpeg\" % i):\r\n i += 1\r\n with open(\"sample%s.jpeg\" % i, \"wb\") as f:\r\n f.write(image)", "def autographs_topdf(autographs: dict, name: str) -> None:\n img = mpimg.imread(\"./cover.png\")\n with PdfPages(f\"{name}.pdf\") as pdf:\n plt.figure(figsize=(11.69, 8.27))\n ax = plt.axes()\n ax.imshow(img)\n ax.patch.set_facecolor(\"black\")\n plt.axis(\"off\")\n pdf.savefig()\n plt.close()\n\n img = mpimg.imread(\"./auto.png\")\n for key, value in autographs.items():\n plt.figure(figsize=(11.69, 8.27))\n plt.subplot(2, 1, 1)\n plt.imshow(img)\n plt.axis(\"off\")\n\n plt.subplot(2, 1, 2)\n plt.text(\n 0.5,\n 0.5,\n value,\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n )\n plt.title(\n f\"Autograph by {key}\",\n fontdict={\n \"family\": \"serif\",\n \"color\": \"#f0bc81\",\n \"weight\": \"normal\",\n \"size\": 16,\n },\n )\n plt.axis(\"off\")\n\n # plt.tight_layout()\n pdf.savefig()\n plt.close()", "def work(self):\n p = self.printer\n p.setFullPage(True)\n painter = QPainter(p)\n for n, (num, page) in enumerate(self.pageList):\n if self.isInterruptionRequested():\n self.aborted = True\n return p.abort()\n self.progress.emit(num, n+1, len(self.pageList))\n if n:\n p.newPage()\n painter.save()\n # center on the page and use scale 100% (TEMP)\n r = p.pageRect()\n m = QTransform()\n m.translate(r.center().x(), r.center().y())\n m.scale(p.logicalDpiX() / page.dpi, p.logicalDpiY() / page.dpi)\n m.rotate(page.rotation * 90)\n m.scale(page.scaleX, page.scaleY)\n m.translate(page.pageWidth / -2, page.pageHeight / -2)\n painter.setTransform(m, True)\n page.print(painter)\n painter.restore()\n return painter.end()", "def gen_text (path_img):\n \n try:\n image_files = []\n os.chdir(path_img)\n for filename in os.listdir(os.getcwd()):\n if filename.endswith(\".JPG\"):\n image_files.append(path_img + filename)\n with open(\"images.txt\", \"w\") as outfile:\n for image in image_files:\n outfile.write(image)\n outfile.write(\"\\n\")\n outfile.close()\n \n except KeyboardInterrupt:\n print('Interrupted')\n try:\n os.system.exit(0)\n except SystemExit:\n os._exit(0)", "def process(self, image):", "def gen(camera):\n \n while True:\n \n \n \n frame = camera.get_frame()\n \n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def next(self):\n\n fn_frame = os.path.join(self.sequence_root, 'image_2/%06d.png' % (self.index))\n fn_velo = os.path.join(self.sequence_root, 'velodyne/%06d.bin' %(self.index))\n fn_label = os.path.join(self.sequence_root, 'labels/%06d.label' %(self.index))\n\n if not os.path.exists(fn_frame) or not os.path.exists(fn_velo):\n print('End of sequence')\n return False\n \n if not os.path.exists(fn_label):\n print('Semantic KITTI label file not found')\n return False\n\n self.frame = cv2.imread(fn_frame)\n if self.frame is None:\n print('File could not be read',fn_frame)\n \n self.points = np.fromfile(fn_velo, dtype=np.float32).reshape(-1, 4)[:,:3]\n self.n_pts = self.points.shape[0]\n label = np.fromfile(fn_label, dtype=np.uint32).reshape((-1))\n\n if label.shape[0] == self.points.shape[0]:\n self.sem_label = label & 0xFFFF # semantic label in lower half\n self.inst_label = label >> 16 # instance id in upper half\n assert((self.sem_label + (self.inst_label << 16) == label).all()) # sanity check\n else:\n print(\"Points shape: \", self.points.shape)\n print(\"Label shape: \", label.shape)\n raise ValueError(\"Scan and Label don't contain same number of points\")\n\n self.index += 1\n return True", "def page_next(self):\n if self._npos >= self._npages - 1:\n # exit if we are already at the end\n self.page_quit()\n else:\n self._npos += 1\n if self.exit_on_lastpage and self._npos >= (self._npages - 1):\n self.display(show_footer=False)\n self.page_quit(quiet=True)\n else:\n self.display()", "def process_next_image(self):\n if self.queue:\n next_queue_item = self.queue.popleft()\n if type(next_queue_item) == str:\n if next_queue_item == 'clear':\n self.signal_status_message.emit('Clearing ROI data (from request in image queue)')\n self.clear()\n return\n [image,file_id,image_num] = next_queue_item\n # print('image_num',image_num)\n # print('next image',self.next_image)\n self.signal_status_message.emit('Started processing ID {} Im {}'.format(file_id,image_num))\n image = image - self.emccd_bias # don't edit in place because this seemed to cause an issue with images not showing in GUI. Maybe not thread safe?\n # print('image min',np.min(image))\n # print('image max',np.max(image))\n image_num_too_big = False\n for group in self.roi_groups:\n for roi in group.rois:\n try:\n roi.counts[image_num][file_id] = image[roi.x:roi.x+roi.w,roi.y:roi.y+roi.h].sum()\n except IndexError: # image_num was not valid for the number of images that MAIA is expecting\n image_num_too_big = True\n if image_num_too_big:\n self.signal_status_message.emit('Image number {} is greater than max expected images, so this image has been ignored (most likely cause is rearrangement toggle).')\n self.signal_status_message.emit('Finished processing ID {} Im {}'.format(file_id,image_num))\n self.calculate_thresholds()", "def do_all(self):\r\n self.frame_gen.start()\r\n\r\n while True:\r\n msg = self.rec_queue.get()\r\n if msg[0] == 'sync':\r\n self.send_queue.put(('sync', time.time()))\r\n continue\r\n if msg[0] == 'finish':\r\n break\r\n if msg[0] != 'img':\r\n raise ValueError(f'strange msg: {msg}')\r\n\r\n frame_num = msg[1]\r\n time_ms = self.ms_per_frame * frame_num\r\n rawimg = self.frame_gen.generate_at(time_ms)\r\n self.img_queue.put((frame_num, rawimg))\r\n self.send_queue.put(('post', frame_num))\r\n rawimg = None\r\n\r\n self.frame_gen.finish()\r\n\r\n self.img_queue.close()\r\n self.rec_queue.close()\r\n self.send_queue.close()", "def save(self):\n\n self.image.save(\"./output/\" + self.name + \" pg\" + str(self._page) + \".png\")", "def page_next(self):\n if self._pos >= self._npages - 1:\n # exit if we are already at the end\n self.page_quit()\n else:\n self._pos += 1\n self._display()", "def generate():\n global output_frame, lock\n while True:\n with lock:\n if output_frame is None:\n continue\n (flag, encoded_image) = cv2.imencode(\".jpg\", output_frame)\n if not flag:\n continue\n yield (b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n bytearray(encoded_image) + b'\\r\\n')", "def gen():\n while True:\n retval, frame = vc.read()\n\n if retval:\n #image_processing(frame)\n frame = cv2.imencode('.jpg', frame)[1].tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def move_next_image(self):\r\n self.index += 1\r\n progress_string = \"%d/%d\" % (self.index+1, self.n_paths)\r\n self.progress_label.configure(text=progress_string)\r\n \r\n #sorting_string = df.sorted_in_folder[self.index].split(os.sep)[-2] #shows the last folder in the filepath before the file\r\n sorting_string = self.df.sorted_in_folder[self.index].split(\"/\")[-2]\r\n self.sorting_label.configure(text=(\"In folder: %s\" % (sorting_string)))\r\n \r\n # if 'OCT_V2' in sorting_string:\r\n # cat_string = 'Unlabelled'\r\n # else:\r\n # cat_string = \r\n \r\n for label in labels:\r\n if label not in sorting_string:\r\n cat_string = 'Unlabelled'\r\n else:\r\n cat_string = sorting_string\r\n \r\n self.cat_label.configure(text = ('Current Category : %s' %(cat_string)))\r\n \r\n display_name = \"Name = %s\" % (self.file_names[self.index])\r\n self.name_label.configure(text = display_name)\r\n \r\n if self.index < self.n_paths:\r\n self.set_image(self.df.sorted_in_folder[self.index])\r\n else:\r\n self.master.quit()", "def merger_page_pdf(self, input_pdf, output_pdf):\n output = PdfFileWriter()\n # Appending two pdf-pages from two different files\n _input_pdf = PdfFileReader(open(input_pdf, \"rb\"))\n for i in range(30):\n page = _input_pdf.getPage(0)\n artbox = page.artBox\n x = artbox[0]\n y = artbox[1]\n y = artbox[2]\n y = artbox[3]\n output.addPage(page)\n # output.addPage(_input_pdf.getPage(0))\n # output.addPage(_input_pdf.getPage(0))\n\n # Writing all the collected pages to a file\n output.write(open(output_pdf, \"wb\"))\n\n\n # Creating a routine that appends files to the output file\n\n\n # Creating an object where pdf pages are appended to", "def generate_images(self, model, test_input, step, dst_dir):\n prediction = model(test_input)\n\n plt.figure(figsize=(12, 12))\n display_list = [test_input[0], prediction[0]]\n title = ['Input Image', 'Predicted Image']\n\n for i in range(2):\n plt.subplot(1, 2, i+1)\n plt.title(title[i])\n # getting the pixel values between [0, 1] to plot it.\n plt.imshow(display_list[i] * 0.5 + 0.5)\n plt.axis('off')\n filename = os.path.join(dst_dir, 'generated_imgs_at_step_{:06d}.png'.format(step))\n plt.savefig(filename)", "def onConvertPDFToImage(self):\n import os\n\n try:\n file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'bin64', 'PDF_TO_IMAGE.exe')\n os.startfile(file_path)\n except Exception as ex:\n message = 'error occurred({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename,\n sys.exc_info()[-1].tb_lineno)\n self.addMessage.emit(MessageType.Error, message)", "def save_img(self):\r\n self.extract_info_from_file()\r\n path_0 = os.path.join(self.output_path, self.field_id, self.patient_id + self.ext)\r\n path_1 = os.path.join(self.output_path, self.field_id + '_' + self.instance, self.patient_id + self.ext)\r\n if self.shot == '0': # first shot\r\n if os.path.exists(path_0) or os.path.exists(path_1):\r\n print(self.patient_id, 'already done')\r\n pass\r\n else:\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)\r\n else: # newer shot\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)", "def get_next_img(self, current_img):\n list = self.listImages.previews\n indx_next = (list.index(current_img) + 1) % len(list)\n next_img = list[indx_next]\n return next_img", "def generate(self):\n if not (os.path.exists(self.target_path.get()) and self.target_file.get()):\n messagebox.showerror(\"Invalid Arguments\",\n \"At least one path was not valid.\")\n return\n self.progress.grid(row=100, columnspan=3, sticky=\"ews\")\n self.update()\n thread = threading.Thread(\n target=folder_structure_backup.iterate_and_save,\n args=(self.target_path.get(), self.target_file.get(),\n \"big\" if not self.fastmode.get() else \"fast\"))\n thread.daemon = True\n thread.start()\n while thread.is_alive():\n self.progress.step(amount=2)\n self.progress.update()\n self.update()\n time.sleep(0.1)\n self.progress.grid_forget()\n self.finished = True\n self.destroy()\n self.quit()", "def generate_image(self, img, seednum=None):\n r = self.csettings['R']\n if self.csettings['auto_cleanup']:\n clean_old_entries(self.csettings['captchas_dir'])\n\n cs = self.csettings\n imagesize = cs['imagesize']\n fontdir = path.join(cs['captchaconf_dir'], 'fonts')\n fontnames = [path.join(fontdir, x) for x in listdir(fontdir) ]\n\n for dummy in range(self.csettings['iterations']):\n posnew = 7\n if dummy != 0:\n cs.generate_solution()\n # render characters\n for c in self.csettings['solution']:\n fgimage = Image.new('RGB', imagesize, cs['fgcolor'])\n font = ImageFont.truetype(r.choice(fontnames), r.randrange(*cs['minmaxheight']))\n charimage = Image.new('L', font.getsize(' %s ' % c), '#000000')\n draw = ImageDraw.Draw(charimage)\n draw.text((0,0), ' %s' % c, font=font, fill='#ffffff')\n if cs['eraser']:\n eraserline = ( 0, r.choice(range(0, charimage.size[1])), \n charimage.size[0], r.choice(range(0, charimage.size[1])))\n draw = ImageDraw.Draw(charimage)\n draw.line(eraserline, width=cs['eraser_width'] , fill='#000000')\n charimage = charimage.rotate(r.randrange(*cs['minmaxrotations']), expand=1,\n resample=Image.BILINEAR)\n charimage = charimage.crop(charimage.getbbox())\n maskimage = Image.new('L', imagesize)\n ypos = r.randrange(*cs['minmaxvpos'])\n maskimage.paste(charimage, \n (posnew, ypos, \n charimage.size[0]+posnew, \n charimage.size[1]+ypos)\n )\n img = Image.composite(fgimage, img, maskimage)\n posnew += charimage.size[0] + r.randrange(*cs['minmaxkerning'])\n\n # draw line(s)\n for dummy in range(cs.get('num_lines')):\n linex = r.choice( range(2, cs['minmaxheight'][1]) )\n minmaxliney = ( cs['minmaxvpos'][0], \n cs['minmaxvpos'][1] + cs['minmaxheight'][0])\n linepoints = [linex, r.randrange(*minmaxliney)]\n while linex < posnew:\n linex += r.randrange(*cs['minmaxheight']) * 0.8\n linepoints.append(linex)\n linepoints.append(r.randrange(*minmaxliney))\n draw = ImageDraw.Draw(img)\n draw.line(linepoints, width=cs['line_width']\n , fill=cs['fgcolor'])\n return img", "def image_generator_not_random(list_of_files, crop_size=320, scale=1):\n while True:\n text_region = []\n for jpgname in list_of_files:\n print jpgname\n # jpgname = np.random.choice(list_of_files)\n img = cv2.imread(jpgname)\n pattern = re.compile('jpg')\n txtname = pattern.sub('txt', jpgname)\n if not os.path.isfile(txtname):\n continue\n cropped_image = img\n with open(txtname, 'r') as f:\n for line in f:\n line_split = line.strip().split(',')\n print line_split\n # clockwise\n (x1, y1, x2, y2) = line_split[0:4]\n (x3, y3, x4, y4) = line_split[4:8]\n text_region.append([string.atof(x1), string.atof(y1), string.atof(x2), string.atof(y2),\n string.atof(x3), string.atof(y3), string.atof(x4), string.atof(y4)])\n if cropped_image is None or text_region is None or \\\n cropped_image.shape[0] != crop_size or cropped_image.shape[1] != crop_size:\n continue\n yield [scale * cropped_image, text_region]", "def genFrameImages((widthPixels, heightPixels), flashColourGen, flashColourGenPipTrain, numFrames, FPS, superSamplingScale=8, BG_COLOUR=(0,0,0), TEXT_COLOUR=(255,255,255), GFX_COLOUR=(255,255,255), title=\"\", TITLE_COLOUR=(255,255,255), FRAMES_AS_FIELDS=False, frameSkipChecker=None, segments=[]):\n\n # we're going to draw a larger (super sampled) image and then scale it down\n # to get smoothing (compensating for the lack of anti-aliased drawing functions\n # in PIL)\n\n width = widthPixels * superSamplingScale\n height = heightPixels * superSamplingScale\n\n flashCols = list(flashColourGen)[0:numFrames]\n flashColsPipTrain = list(flashColourGenPipTrain)[0:numFrames]\n\n # we'll pretend we're working within a rectangle (0,0) - (160,90)\n # and use a scaling function to map to out actual dimensions\n scaler = AspectPreservingCoordinateScaler((160,90),(width,height))\n\n # load a font for text\n font = loadFont(sizePt = scaler.s(4))\n smallfont = loadFont(sizePt = scaler.s(4))\n \n # work out the segment description text, then check its size and adjust the fontsize to ensure it fits within bounding area\n if segments:\n segment_description_text = \"\\n\".join(map(lambda seg : seg[\"description\"], segments))\n tmpimg = Image.new(\"RGB\", (width, height), color=BG_COLOUR)\n tmpdraw = ImageDraw.Draw(tmpimg)\n w,h = tmpdraw.multiline_textsize(segment_description_text, font=smallfont)\n max_w, max_h = scaler.xy((140,13))\n \n shrink_factor = min(float(max_w) / w, float(max_h) / h, 1)\n smallfont = loadFont(sizePt = scaler.s(4*shrink_factor))\n \n poy = 0 # pie Y offset\n dfy = 65 # duration and FPS labels Y offset\n if segments:\n poy = -10\n dfy = 19\n\n\n\n WHITE=(255,255,255)\n BLACK=(0,0,0)\n\n if FRAMES_AS_FIELDS:\n imageName = \"field\"\n labelFps = FPS / 2\n else:\n imageName = \"frame\"\n labelFps = FPS\n\n\n for frameNum in range(0,numFrames):\n if frameSkipChecker is not None:\n shouldSkip=frameSkipChecker(frameNum)\n if shouldSkip:\n yield None\n continue\n\n timecode = frameNumToTimecode(frameNum, FPS, framesAreFields=FRAMES_AS_FIELDS)\n timeSecs = float(frameNum) / FPS\n nextTimeSecs = float(frameNum+1) / FPS # time of next frame after this\n durationTimecode = frameNumToTimecode(numFrames, FPS)\n\n # create black image and an object to let us draw on it\n img = Image.new(\"RGB\", (width, height), color=BG_COLOUR)\n draw = ImageDraw.Draw(img)\n\n # draw a flashing rectangular box on the left side\n flashColour = flashCols[frameNum]\n topLeft = scaler.xy((10, 30))\n bottomRight = scaler.xy((40, 60))\n draw.rectangle(topLeft + bottomRight, outline=None, fill=GFX_COLOUR)\n topLeft = scaler.xy((11, 31))\n bottomRight = scaler.xy((39, 59))\n draw.rectangle(topLeft + bottomRight, outline=None, fill=flashColour)\n\n # draw text label explaining to attach light sensor to the flashing box\n topLeft = scaler.xy((41, 37))\n draw.text(topLeft, \"Use light detector\", font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((41, 41))\n draw.text(topLeft, \"on centre of\", font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((41, 45))\n draw.text(topLeft, \"this box\", font=font, fill=TEXT_COLOUR)\n\n # draw text labels giving frame number, timecode and seconds covered by this frame\n topLeft = scaler.xy((10, 4))\n draw.text(topLeft, timecode, font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((10, 9))\n draw.text(topLeft, \"%06d of %d %ss\" % (frameNum, numFrames, imageName), font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((10, 14))\n draw.text(topLeft, u\"%08.3f \\u2264 t < %08.3f secs\" % (timeSecs, nextTimeSecs), font=font, fill=TEXT_COLOUR)\n\n topLeft = scaler.xy((10,dfy))\n draw.text(topLeft, \"Duration: \" + durationTimecode, font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((10,dfy+5))\n draw.text(topLeft, \"%d fps\" % labelFps, font=font, fill=TEXT_COLOUR)\n\n # and more text labels, but this time right justified\n text = title\n w,h = font.getsize(text)\n topLeft = scaler.xy((150,4))\n topLeft = topLeft[0] - w, topLeft[1]\n draw.text(topLeft, text, font=font, fill=TITLE_COLOUR)\n\n # draw an outer ring segment indicating the time period covered by the current frame\n topLeft = scaler.xy((105, 20+poy))\n bottomRight = scaler.xy((155, 70+poy))\n angle1 = 360 * (frameNum % FPS) / FPS\n angle2 = 360 * ((frameNum % FPS) + 1) / FPS\n draw.pieslice(topLeft + bottomRight, start=270+angle1, end=270+angle2, outline=None, fill=GFX_COLOUR)\n\n # hollow it out to make the circle into a ring\n topLeft = scaler.xy((108, 23+poy))\n bottomRight = scaler.xy((152, 67+poy))\n draw.ellipse(topLeft + bottomRight, outline=None, fill=BG_COLOUR)\n\n\n # draw frame num ring\n topLeft = scaler.xy((110, 25+poy))\n bottomRight = scaler.xy((150, 65+poy))\n angle = 360 * (frameNum % FPS) / FPS\n if (frameNum / FPS) % 2 == 0: # if this is an even second (0-0.9, 2-2.9, 4-4.9 etc)\n draw.pieslice(topLeft + bottomRight, start=270, end=270+angle, outline=None, fill=GFX_COLOUR)\n else:\n draw.pieslice(topLeft + bottomRight, start=270+angle, end=270+360, outline=None, fill=GFX_COLOUR)\n\n # hollow it out to make the circle into a ring\n topLeft = scaler.xy((113, 28+poy))\n bottomRight = scaler.xy((147, 62+poy))\n draw.ellipse(topLeft + bottomRight, outline=None, fill=BG_COLOUR)\n \n # draw outer for segments\n if segments:\n topLeft = scaler.xy((115-0.25, 30+poy-0.25))\n bottomRight = scaler.xy((145+0.25, 60+poy+0.25))\n draw.ellipse(topLeft + bottomRight, fill=WHITE, outline=None)\n topLeft = scaler.xy((115, 30+poy))\n bottomRight = scaler.xy((145, 60+poy))\n draw.ellipse(topLeft + bottomRight, fill=BLACK, outline=None)\n\n # draw progress pie\n topLeft = scaler.xy((115, 30+poy))\n bottomRight = scaler.xy((145, 60+poy))\n angle = 360.0*frameNum/numFrames\n precise_filled_pieslice(draw, topLeft + bottomRight, start=270, end=270+angle, outline=None, fill=GFX_COLOUR)\n\n # draw segments over the pieslice\n if segments:\n for i in range(0, len(segments)):\n angle = math.radians(270 + 360.0*segments[i][\"startSecs\"]/numFrames*FPS)\n centre = scaler.xy((130,45+poy))\n armEnd = scaler.xy((130 + 15*math.cos(angle), 45+poy + 15*math.sin(angle)))\n draw.line([centre, armEnd], fill=WHITE, width=int(scaler.s(0.25)))\n \n segStartFrame = segments[i][\"startSecs\"] * FPS\n nextStartFrame = segments[(i+1) % len(segments)][\"startSecs\"] * FPS\n if nextStartFrame <= segStartFrame:\n nextStartFrame += numFrames\n midAngle = math.radians(270 + 360.0* (segStartFrame+nextStartFrame)/2/numFrames)\n w,h = font.getsize(segments[i][\"label\"])\n centre = scaler.xy((130 + 15*math.cos(midAngle)*0.7, 45+poy + 15*math.sin(midAngle)*0.7))\n topLeft = centre[0] - w/2, centre[1] - h/2\n draw.text(topLeft, segments[i][\"label\"], fill=WHITE, font=font)\n\n # draw segment long labels\n topLeft = scaler.xy((10,61))\n draw.multiline_text(topLeft, segment_description_text, fill=WHITE, font=smallfont)\n \n # draw pulse train at the bottom\n LIM=FPS\n NUM_BLOBS = 2*LIM + 1\n blobSpacing = 150.0/NUM_BLOBS\n\n for offset in range(-LIM, +LIM+1):\n left = 80+blobSpacing*(offset-0.5)\n right = 80+blobSpacing*(offset+0.5)\n\n topLeft = scaler.xy(( left, 80 ))\n bottomRight = scaler.xy(( right, 85 ))\n\n seqIndex = offset + frameNum\n if seqIndex >= 0 and seqIndex < numFrames:\n colour = flashColsPipTrain[seqIndex]\n draw.rectangle(topLeft + bottomRight, outline=None, fill = colour)\n\n if offset == 0:\n # draw blob above\n topLeft = scaler.xy(( left, 75 ))\n bottomRight = scaler.xy(( right, 80 ))\n draw.rectangle(topLeft + bottomRight, outline=None, fill = GFX_COLOUR)\n\n # and below\n topLeft = scaler.xy(( left, 85 ))\n bottomRight = scaler.xy(( right, 90 ))\n draw.rectangle(topLeft + bottomRight, outline=None, fill = GFX_COLOUR)\n\n # shrink the image using high quality downsampling\n try:\n scalingMode = Image.LANCZOS\n except AttributeError:\n scalingMode = Image.BICUBIC\n\n rescaledImage = img.resize((widthPixels,heightPixels), scalingMode)\n\n yield rescaledImage", "def getNextImage(self):\n # Get the next image\n frame = self.checkForImage()\n if frame is None:\n return False\n\n # ----------------------- #\n # Background correction\n background = cv2.imread(\"./Utilities/correct.png\")\n frame = cv2.add(frame, background)\n # ----------------------- #\n\n\n # --- FIRST TIME --- #\n if self.Stitching == False:\n self.Stitching = True\n # Display current frame\n self.current = frame\n self.capture = self.convertNumpy2Image(cv2.resize(self.current, (408,320)))\n\n # --- ADD TO CANVAS --- #\n\n # Center of canvas\n center = findCenterStart(self.canvas, self.rows, self.cols)\n\n # Find positions to insert image\n rowStart, rowEnd, colStart, colEnd = computeStartPos(center, 0, 0,\n self.rows*self.mul, self.cols*self.mul)\n\n # Add first image to canvas\n self.canvas[rowStart:rowEnd, colStart:colEnd, :] = frame\n\n # Set previous starting position to center\n self.prevStart = center\n\n\n # MAX POSITIONS\n self.N = rowStart\n self.S = self.N + (self.rows*self.mul)\n self.W = colStart\n self.E = self.W + (self.cols*self.mul)\n\n temp = self.canvas[self.N:self.S,self.W:self.E]\n # BOX\n pt1 = (colStart, rowStart)\n pt2 = (colStart+self.cols, rowStart+self.rows)\n cv2.rectangle(temp, pt1, pt2, (255,0,0))\n # Update stich for display\n self.stitch = self.convertNumpy2Image(cv2.resize(temp, (850,750)))\n\n #Update distance\n self.dist = \"Distances from Edge: N: {N}, E {E}, S: {S}, W:{W}\".format(N=self.N,S=self.S, E=self.E, W=self.W)\n\n return True\n\n # --- EVERY OTHER TIME --- #\n else:\n self.previous = self.current\n self.current = frame\n\n cols = self.cols\n rows = self.rows\n mul = self.mul\n\n # Grab next image pair\n imageA = self.previous\n imageB = self.current\n imageA_small = cv2.resize(self.previous, (cols, rows))\n imageB_small = cv2.resize(self.current, (cols, rows))\n\n # Find difference between two images\n rowOff, colOff = findOffset(imageA_small, imageB_small,alg=\"SIFT\")\n\n # Increase offset\n rowOff *= mul; colOff *= mul\n\n # Find splice coordinates\n rowStart, rowEnd, colStart, colEnd = computeStartPos(self.prevStart, rowOff,\n colOff, rows*mul, cols*mul)\n\n # Add image to canvas\n self.canvas[rowStart:rowEnd, colStart:colEnd, :] = maskOverlap(self.canvas[rowStart:rowEnd, colStart:colEnd, :], imageB)\n\n # Update previous StartPosition\n self.prevStart = (rowStart, colStart)\n\n # Update Max Positions\n if rowStart < self.N:\n self.N = rowStart\n if rowEnd > self.S:\n self.S = rowEnd\n if colStart < self.W:\n self.W = colStart\n if colEnd > self.E:\n self.E = colEnd\n\n # Crop for display\n temp = self.canvas[self.N:self.S,self.W:self.E].copy()\n # --- BOX --- #\n # Calculate placement of box\n distFromNorth = abs(self.N - rowStart)\n distFromWest = abs(self.W - colStart)\n # add bounding box\n pt1 = (distFromWest, distFromNorth)\n pt2 = (distFromWest+self.cols*self.mul, distFromNorth+self.rows*self.mul)\n cv2.rectangle(temp, pt1, pt2, (0,255,0), 20)\n # Update stich for display\n self.stitch = self.convertNumpy2Image(cv2.resize(temp, (850,750)))\n\n # --- ALL THE TIME --- #\n # Update capture everytime!\n self.capture = self.convertNumpy2Image(cv2.resize(self.current, (408,320)))\n\n # Add distance to help figure out where edge of canvas is!\n self.dist = \"Distances from Edge: N: {N}, E {E}, S: {S}, W:{W}\".format(N=self.N,S=self.S, E=self.E, W=self.W)\n return True", "def _upload_page(self, local_pdf_path, page_number, jpeg_prefix, bucket,\n webhook_url, webhook_data):\n local_jpeg_prefix = jpeg_prefix.replace('/', '-')\n local_large_jpeg_path = '%s/%s-large.jpeg' % (self.working_dir,\n local_jpeg_prefix)\n local_small_jpeg_path = '%s/%s-small.jpeg' % (self.working_dir,\n local_jpeg_prefix)\n local_jpeg_path = '%s/%s.jpeg' % (self.working_dir, local_jpeg_prefix)\n\n # subprocess.check_call(['convert', '-density', '300', '%s[%d]' %\n # (local_pdf_path, page_number), local_large_jpeg_path])\n # gs is one indexed\n gs_page_number = page_number + 1\n subprocess.check_call(['gs', '-dNOPAUSE', '-sDEVICE=jpeg', '-dFirstPage=%d' % gs_page_number,\n '-dLastPage=%d' % gs_page_number, '-sOutputFile=%s' % local_large_jpeg_path,\n '-dJPEGQ=90', '-r300', '-q', local_pdf_path, '-c', 'quit'])\n subprocess.check_call(['convert', '-resize', '800x800',\n local_large_jpeg_path, local_jpeg_path])\n subprocess.check_call(['convert', '-resize', '300x300',\n local_large_jpeg_path, local_small_jpeg_path])\n self._log('Finished converting page %d' % page_number)\n\n # store converted pages in S3\n large_jpeg_key = s3.Key(bucket)\n jpeg_key = s3.Key(bucket)\n small_jpeg_key = s3.Key(bucket)\n\n large_jpeg_key.key = '%s-large.jpeg' % (jpeg_prefix)\n jpeg_key.key = '%s.jpeg' % (jpeg_prefix)\n small_jpeg_key.key = '%s-small.jpeg' % (jpeg_prefix)\n\n large_jpeg_key.set_contents_from_filename(local_large_jpeg_path)\n jpeg_key.set_contents_from_filename(local_jpeg_path)\n small_jpeg_key.set_contents_from_filename(local_small_jpeg_path)\n\n large_jpeg_key.set_acl('public-read')\n jpeg_key.set_acl('public-read')\n small_jpeg_key.set_acl('public-read')\n\n self._log('Uploaded page %d' % page_number)\n self._call_webhook(webhook_url, webhook_data, local_jpeg_path, page_number)", "def forward_partial(self, drawing):\n assert not self.is_cuda\n drawing = drawings_to_cpu(drawing)\n ns = len(drawing)\n pimgs = torch.zeros(ns+1, *self.imsize)\n canvas = torch.zeros(*self.imsize)\n for i, stk in enumerate(drawing):\n canvas, _ = self.add_stroke(canvas, stk)\n pimgs[i+1] = canvas\n\n return pimgs", "def pdf_split(directory, correct_rotation, even_pages, merge_config, min_image_data_density):\n\n log.info('Working on PDF files in %s', directory)\n\n output_filenames = []\n\n # Open the PDF files\n all_pdf_files = [os.path.join(directory, filename) for filename in all_pdf_files_in_directory(directory)]\n log.info('Found the following PDF files\\n %s', '\\n '.join(all_pdf_files))\n opened_files = [open(path, 'rb') for path in all_pdf_files]\n\n # Take all the pages in all the PDF files into a generator\n all_pages = concat_pdf_pages(opened_files)\n\n # First split pages into chunks when a page in landscape orientation is detected\n page_chunks1 = split_on(all_pages, predicate=is_landscape)\n # Next merge adjacent chunks that meets certain condition with a merger function\n # this is used to handle situation where the scan is double sided\n page_chunks2 = merge_with_next(page_chunks1, predicate=merge_config[0], merger=merge_config[1])\n\n # For all pages that belongs to the same document ID\n for idx, pages_to_write in enumerate(page_chunks2, start=1):\n # Create a PDF writer instance\n pdf_writer = PdfFileWriter()\n\n # Correct the rotation of the first page in file\n first_page = pages_to_write[0]\n # If this is the first page of the first PDF file, it might not be in landscape view\n # so we check for that\n if is_landscape(first_page):\n log.debug('Correction rotation on the first page=%s', repr(first_page))\n # Correct the rotation the way the user specifies\n correct_rotation(first_page)\n\n # Put those pages into a writer\n detect_blank_page_under_threshold = partial(detect_blank_page, min_density=min_image_data_density)\n non_blank_pages_to_write = ifilter(detect_blank_page_under_threshold, pages_to_write)\n add_pages(pdf_writer, non_blank_pages_to_write)\n # Conditionally make the output PDF file have an even number of pages, which makes printing the PDF file easier\n if even_pages:\n make_pagenum_even(pdf_writer)\n\n output_filename = '{0:05}.pdf'.format(idx)\n output_filenames.append(output_filename)\n # And write those pages to a single PDF file\n log.info('Writing PDF pages to %s', output_filename)\n write_pdf_file(output_filename, pdf_writer)\n\n # Make sure to close all the files that were opened\n log.debug('Closing all opened files')\n for opened_file in opened_files:\n opened_file.close()\n\n return output_filenames", "def gen(camera):\n\tcamera.start_camera_thread()\n\twhile True:\n\t\tframe = camera.get_frame()\n\t\tyield (b'--frame\\r\\n'\n\t\t\t b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def render_png(self, target=None, split_pages=False, resolution=300):\n if target is not None and split_pages:\n # get destination filename and extension\n filename, ext = os.path.splitext(target)\n for page_num, page in enumerate(self._document.pages):\n page_name = filename + f\"_pg_{page_num}\" + ext\n self._document.copy([page]).write_png(\n target=page_name, resolution=resolution\n )\n return None\n elif target is None:\n # return image bytes string if no target is specified\n png_bytes, png_width, png_height = self._document.write_png(\n target=target, resolution=resolution\n )\n return png_bytes\n else:\n return self._document.write_png(target=target, resolution=resolution)", "def run_observation(self):\n\n self._generate_direct_image() # to calibrate x_ref and y_ref\n\n num_frames = len(self.exp_start_times)\n progress = Progress(num_frames)\n self.progess = progress\n\n progress_line = 'Generating frames 0/{} done'.format(num_frames)\n progress.print_status_line(progress_line)\n progress.progress_line = progress_line\n\n for i, start_time in enumerate(self.exp_start_times):\n filenum = i + 1\n self._generate_exposure(start_time, filenum)\n\n progress.increment()\n progress_line = 'Generating frames {}/{} done'.format(filenum,\n num_frames)\n progress.print_status_line(progress_line)\n\n # so it can be retreived by exposure_generator\n progress.progress_line = progress_line", "def create_next_button(self, img_next, but_pos):\n tk.Button(self.top, height=50, width=50, image=img_next, \n command=lambda: self.retrieve_input()).grid(row=but_pos[0], \n column=but_pos[1])", "def generate():", "def main():\n mip = parametros()\n mir = Reporte(CURRENT_PATH, mip.debug, mip.overwrite)\n pdfs = mir.obtener()\n if pdfs:\n print(\"Obteniendo nuevos pdf:\")\n for pdf in pdfs:\n print(f\"* {pdf}\")\n\n for file in glob.glob(f\"{CURRENT_PATH}/resources/pdf/*.pdf\"):\n data = mir.parser(file)\n mir.escribir(data)", "def convert_pdf2image(pdf_path_obj):\n pdf = convert_from_path(str(pdf_path_obj))\n\n for index, page in enumerate(pdf):\n image_path = image_dir / Path(\n pdf_path_obj.stem + \"_\" + \"{:0>4}\".format(index) + \".png\"\n )\n page.save(str(image_path), \"png\")", "def generate_pdf(list,id):\n\n doc = SimpleDocTemplate(settings.STATIC_ROOT+\"/tests/\"+str(id)+\"/\"+str(id)+\".pdf\")\n\n Story = [Spacer(1,2*inch)]\n styles = stylesheet()\n global Title\n\n # Add 10 questions with boxes below\n for i in list:\n if not i[0] in \"skills-scan\" and not i[0] in \"csrfmiddlewaretoken\" and not i[0] in \"titre\" and not i[0] in \"custom\":\n tmp = int(i[0])+1\n bogustext = (str(tmp)+\". %s\" % i[1])\n p = Paragraph(bogustext, styles['default'])\n # Write the paragraph\n\n draw = Drawing()\n # rect(x1,y1,width,height)\n rec = Rect(0, 100, 450, 150)\n rec.fillColor = colors.white\n # draw the rect under each paragraph\n draw.add(rec)\n p.keepWithNext = True\n Story.append(p)\n Story.append(draw)\n Story.append(Spacer(1,-0.9 * inch))\n elif i[0] in \"titre\":\n Title = i[1]\n # build the document by inserting the whole story\n doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages)\n return str(id)+\".pdf\"", "def advance_image_count(self,file_id=None,image_num=None):\n # self.next_image = (self.next_image+1) % self.num_images\n if file_id is not None:\n self.file_id = file_id\n if image_num is None:\n self.next_image += 1\n else:\n self.next_image = image_num + 1\n if self.next_image >= self.num_images:\n self.next_image = 0\n self.file_id += 1\n self.signal_next_image_num.emit(self.next_image)\n self.signal_file_id.emit(self.file_id)" ]
[ "0.6618433", "0.63336223", "0.62817127", "0.6245009", "0.6133024", "0.60717624", "0.60625297", "0.5926207", "0.58587873", "0.58525664", "0.584871", "0.58015543", "0.5758933", "0.57559025", "0.5751829", "0.5703416", "0.5676019", "0.5664894", "0.563774", "0.56377035", "0.5630192", "0.56093603", "0.5591889", "0.55361825", "0.5491271", "0.5488991", "0.54884547", "0.5481395", "0.5476489", "0.5475486", "0.54588646", "0.5443004", "0.54318136", "0.54258347", "0.54123", "0.5407354", "0.5405202", "0.53947115", "0.5392463", "0.5385272", "0.53786945", "0.53681475", "0.53649837", "0.5363759", "0.53373414", "0.5335946", "0.5317602", "0.5314791", "0.530591", "0.5293994", "0.529048", "0.5285999", "0.5281593", "0.5269489", "0.5269182", "0.523494", "0.52344567", "0.5233613", "0.5232073", "0.5222494", "0.5221644", "0.52150875", "0.5210715", "0.52066964", "0.52028686", "0.51837736", "0.5178833", "0.5167006", "0.5164105", "0.515552", "0.51544106", "0.515255", "0.51514244", "0.51443994", "0.51428086", "0.51409453", "0.51269466", "0.51223534", "0.51100206", "0.5109934", "0.510463", "0.5097061", "0.5092329", "0.5091597", "0.50873846", "0.50857013", "0.5085016", "0.50847834", "0.5079706", "0.50711304", "0.50691056", "0.5068491", "0.5066832", "0.5066038", "0.5058115", "0.50579464", "0.5051163", "0.50501066", "0.50497365", "0.50470936", "0.50425696" ]
0.0
-1
When sources override `get_database_names`, they will need to setup multiple inspectors. They can use this function.
def set_inspector(self, database_name: str) -> None: logger.info(f"Ingesting from database: {database_name}") new_service_connection = deepcopy(self.service_connection) new_service_connection.database = database_name self.engine = get_connection(new_service_connection) self.inspector = inspect(self.engine) self._connection = None # Lazy init as well
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_database_names(self) -> Iterable[str]:\n custom_database_name = self.service_connection.__dict__.get(\"databaseName\")\n\n database_name = self.service_connection.__dict__.get(\n \"database\", custom_database_name or \"default\"\n )\n # By default, set the inspector on the created engine\n self.inspector = inspect(self.engine)\n yield database_name", "def get_databases(self):\n pass", "def getDatabaseName(self):\n raise NotImplementedError", "def _get_db_names(self, dbs, strict=True):\n dbs = utils.coerce_to_list(dbs)\n db_names = [utils.get_name(db) for db in dbs]\n if strict:\n good_dbs = self.instance.list_databases()\n good_names = [utils.get_name(good_db) for good_db in good_dbs]\n bad_names = [db_name for db_name in db_names\n if db_name not in good_names]\n if bad_names:\n bad = \", \".join(bad_names)\n raise exc.NoSuchDatabase(\"The following database(s) were not \"\n \"found: %s\" % bad)\n return db_names", "def get_databases ():\n return _dbobjects[:]", "def get_db_name(self):\n\t\treturn conf.db_name", "def databases(database_container):\n database_container.setupall()\n return database_container", "def setup(self):\n return self.setupDatabases()", "def getTables(self):\n\treturn self.dbNames", "def get_sqla_makers():\n registry = dict()\n for schema in omix_schemas.get_schemas().values():\n sqla_maker = TablelikeSqlaMaker(schema)\n tbl_name = sqla_maker.get_table_name() \n registry[tbl_name] = sqla_maker\n \n users_nm = nest_users.COLLECTION_NAME\n users_sqlam = core_db.get_nest_users_sqla_maker()\n registry[users_nm] = users_sqlam\n\n return registry", "def getDatabases(self):\n query = 'SELECT name FROM sys.databases'\n df = pd.read_sql(query, self.conn)\n return df", "def setupDatabases(con, options, dbList):\n currentDatabases = dbGetFirstColumnAsMap(con, \"select datname from pg_database where datistemplate = false\")\n currentRolenames = dbGetFirstColumnAsMap(con, \"select rolname from pg_roles\")\n trace(\"currentDatabases = \" + str(currentDatabases))\n for dbName in dbList:\n trace(\"dbName='%s'\" % str(dbName))\n setupDatabase(con, options, currentDatabases, currentRolenames, dbName, dbList[dbName])", "def __set_database_info(self):\n if self.service == \"sigs\":\n self.database_name = \"sigs_local\"\n # dict of dump files and the tables associated\n self.dump_files = {\n \"01_auth.sql\": [\"auth_group\", \"auth_group_permissions\", \"auth_permission\", \"auth_user\",\n \"auth_user_groups\", \"auth_user_user_permissions\"],\n \"02_django.sql\": [\"django_content_type\", \"django_migrations\", \"django_admin_log\", \"django_session\"],\n \"03_daily_irradience.sql\": [\"solar_models_dailyglobalirradiance\", \"solar_models_hourlyglobalirradiance\"]\n }", "def meta_db_tables(self) -> list:\r\n def _passer(**kwargs):\r\n data = self.engine.execute(\"\"\"\r\n SELECT * FROM sqlite_master WHERE type='table';\r\n \"\"\").fetchall()\r\n table_names = [i[1] for i in data]\r\n return table_names\r\n return self._connectionController(_passer)", "def database_name(self) -> str:\n return pulumi.get(self, \"database_name\")", "def makeDatabaseNamesList(n, ):", "def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"databases\")", "def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"databases\")", "def getTableNames(self):\n\tif not self.dbNames:\n\t # get db table names from DB\n\t if self.dbType==\"sqlite\":\n\t query=\"SELECT name FROM sqlite_master WHERE type='table';\"\n\t elif self.dbType==\"mysql\":\n\t query=\"SHOW TABLES\"\n\t self.startTxn(\"SQLUtil.__init__\")\n\t tup=self.fetchAll(query)\n\t self.endTxn(\"SQLUtil.__init__\")\n\t for item in tup:\n\t self.dbNames.append(item[0])\n\treturn self.dbNames", "def get_name(self) -> str:\n return self.dbname", "def databases(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"databases\")", "def _get_requested_databases(self):\r\n requested_databases = []\r\n if ((self._requested_namespaces is not None) and\r\n (self._requested_namespaces != [])):\r\n for requested_namespace in self._requested_namespaces:\r\n if requested_namespace[0] is '*':\r\n return []\r\n elif requested_namespace[0] not in IGNORE_DBS:\r\n requested_databases.append(requested_namespace[0])\r\n return requested_databases", "def mysql_database_name():\n return 'test'", "def databases(self):\n return self._databases", "def db_for_read(self, model, **hints):\n state_db = self._db_name(model)\n if state_db in settings.DATABASES:\n name = state_db\n else:\n name = 'default'\n logger.debug('db_for_read({}): {}'.format(state_db, name))\n return name", "def uses_database(self, dbname):\n used = False\n if any([dbname.upper() in y for y in [x.upper() for x in self._dbnames]]):\n used = True\n return used", "def getDatabaseName(self):\n return self._base.getDatabaseName()", "def get_database_name(self, data: dict) -> str: # pylint: disable=arguments-differ\n if not data[\"database_name\"] and self.service_connection.database:\n return self.service_connection.database\n return data[\"database_name\"]", "def conf_master_datasource():\n\n if DB_TYPE == \"mysql\":\n file_path = '../data/dbconnectors/mysql/master-datasources.xml'\n url = ['jdbc:mysql://%s:%d/%s?useSSL=false' % (HOST, PORT, REG_DB),\n 'jdbc:mysql://%s:%d/%s?useSSL=false' % (HOST, PORT, USER_DB),\n 'jdbc:mysql://%s:%d/%s?useSSL=false' % (HOST, PORT, AM_DB)]\n try:\n master_datasource_conf(file_path, 'url', url)\n master_datasource_conf(file_path, 'username', USER_NAME)\n master_datasource_conf(file_path, 'password', PWD)\n print(\"Successfully configured master-datasource.xml file for MySQL database!\")\n except:\n print(\"ERROR: configuring master datasource for MySQL database!!!\")\n elif DB_TYPE == \"oracle\":\n file_path = '../data/dbconnectors/oracle/master-datasources.xml'\n url = 'jdbc:oracle:thin:%s@%s:%d/%s' % (USER_NAME, HOST, PORT, SID)\n try:\n master_datasource_conf(file_path, 'url', url)\n master_datasource_conf(file_path, 'username', USER_NAME)\n master_datasource_conf(file_path, 'password', PWD)\n print(\"Successfully configured master-datasource.xml file for Oracle database!\")\n except:\n print(\"ERROR: configuring master datasource for Oracle database!!!\")\n elif DB_TYPE == \"mssql\":\n file_path = '../data/dbconnectors/mssql/master-datasources.xml'\n url = ['jdbc:sqlserver://%s:%d;databaseName=%s;SendStringParametersAsUnicode=false' % (HOST, PORT, REG_DB),\n 'jdbc:sqlserver://%s:%d;databaseName=%s;SendStringParametersAsUnicode=false' % (HOST, PORT, USER_DB),\n 'jdbc:sqlserver://%s:%d;databaseName=%s;SendStringParametersAsUnicode=false' % (HOST, PORT, AM_DB)]\n try:\n master_datasource_conf(file_path, 'url', url)\n master_datasource_conf(file_path, 'username', USER_NAME)\n master_datasource_conf(file_path, 'password', PWD)\n print(\"Successfully configured master-datasource.xml file for MSSQL database!\")\n except:\n print(\"ERROR: configuring master datasource for MSSQL database!!!\")\n elif DB_TYPE == \"postgresql\":\n file_path = '../data/dbconnectors/postgresql/master-datasources.xml'\n url = ['jdbc:postgresql://%s:%d/%s' % (HOST, PORT, REG_DB),\n 'jdbc:postgresql://%s:%d/%s' % (HOST, PORT, USER_DB),\n 'jdbc:postgresql://%s:%d/%s' % (HOST, PORT, AM_DB)]\n try:\n master_datasource_conf(file_path, 'url', url)\n master_datasource_conf(file_path, 'username', USER_NAME)\n master_datasource_conf(file_path, 'password', PWD)\n print(\"Successfully configured master-datasource.xml file for PostgreSQL database!\")\n except:\n print(\"ERROR: configuring master datasource for PostgreSQL database!!!\")\n else:\n print(\"Database type is invalid!!!\")", "def database_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_name\")", "def get_test_db():\n defaults = get_defaults()\n test_defaults = {k: v for k, v in defaults.items() if 'test' in k}\n key_list = list(test_defaults.keys())\n key_list.sort()\n db = None\n for k in key_list:\n test_name = test_defaults[k]\n m = re.match('(\\w+)://.*?/([\\w.]+)', test_name)\n if m is None:\n logger.warning(\"Poorly formed db name: %s\" % test_name)\n continue\n sqltype = m.groups()[0]\n try:\n db = DatabaseManager(test_name, sqltype=sqltype, label=k)\n db.grab_session()\n except Exception as e:\n logger.error(\"%s didn't work\" % test_name)\n logger.exception(e)\n continue # Clearly this test database won't work.\n logger.info(\"Using test database %s.\" % k)\n break\n if db is None:\n logger.error(\"Could not find any test database names.\")\n return db", "def database_name(self):\n try:\n return self._database_name\n except:\n pass\n\n if 'X-UnitTest' in self.request.headers:\n if self.request.headers['X-UnitTest'] == 'True':\n self._database_name = TEST_DATABASE\n return TEST_DATABASE\n default_database = self.application.databases['default']['NAME']\n self._database_name = default_database\n return default_database", "def get_db_name(account=None, species=None, db_type=None, release=None,\n division=None, DEBUG=False):\n if account is None:\n account = get_ensembl_account(release=release)\n \n if DEBUG:\n print \"Connection To:\", account\n print \"Selecting For:\", species, db_type, release\n \n server = DbConnection(account, db_name='PARENT')\n cursor = server.cursor()\n show = \"SHOW DATABASES\"\n if species or db_type or release:\n pattern = make_db_name_pattern(species, db_type, release)\n show = \"%s LIKE %s\" % (show, pattern)\n if DEBUG:\n print show\n cursor.execute(show)\n rows = cursor.fetchall()\n dbs = []\n for row in rows:\n try:\n if division is not None and division not in row[0]:\n continue\n name = EnsemblDbName(row[0])\n if (release is None or name.Release == str(release)) and\\\n (db_type is None or name.Type == db_type):\n dbs.append(name)\n except (IndexError, RuntimeError):\n if DEBUG:\n print \"FAIL:\", row[0]\n continue\n return dbs", "def check_name_db ():\n db_checks = [DB_FIRST_MALE, DB_FIRST_FEMALE,\n DB_LAST_SIMPLE, DB_LAST_NAMESON,\n DB_LAST_GAELIC1, DB_LAST_GAELIC2,\n DB_LAST_COMBO1, DB_LAST_COMBO2,\n DB_LAST_UPPER1, DB_LAST_UPPER2]\n\n db_exists = db.database_exists\n for db_name in db_checks:\n if not db_exists(db_name):\n raise DatabaseException, db_name", "def get_tables(self, db_name):\n pass", "def _get_database_name(database):\n # make sure the return is only one data type\n filenames = []\n if database is not None:\n if not isinstance(database, list):\n database = [database]\n for db in database:\n filenames += glob.glob(db)\n\n return filenames", "def db_name(self):\n return self._db_name", "def list_databases():\n config = load_config()\n\n databases = [x for x in config.keys() if \"schemas\" in config[x]]\n return databases", "def produce_all_database(is_debug):\n\tproduce_database([\"apnea-ecg\", \"train\"], is_debug)\n\tproduce_database([\"apnea-ecg\", \"test\"], is_debug)", "def dbname(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dbname\")", "def db_for_read(self, model, **hints):\n\n return self.db_name", "async def prepare_databases(self):", "def mysql_database(run_services, mysql_database_getter, mysql_database_name):\n if run_services:\n return mysql_database_getter(mysql_database_name)", "def db_for_read(self, model, **hints):\n if hasattr(model, 'db_name'):\n return model.db_name\n return 'default'", "def databases(self, databases):\n\n self._databases = databases", "def __init__(self):\n self.databases = []", "def _sync_databases(self):\n host, port = self._src.client().address\n log.info('sync databases from %s:%d' % (host, port))\n for dbname in self._src.client().database_names():\n if dbname in self._ignore_dbs:\n log.info(\"skip database '%s'\" % dbname)\n continue\n if not self._conf.data_filter.valid_db(dbname):\n log.info(\"skip database '%s'\" % dbname)\n continue\n self._sync_database(dbname)\n log.info('all databases done')", "def get_single_db_name():\n expected_db_name = os.environ.get(\"MONGO_DB\")\n if not expected_db_name and is_testing():\n expected_db_name = f\"Test-{time.time_ns() // 1000000}\"\n\n return expected_db_name", "def database_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database_name\")", "def getDatabaseName(self):\n return f\"n{self.name.capitalize()}\"", "def summary(self):\n\t\tprint \"Summary--------------------------------------:\"\n\t\tprint \"Available data sources are:\"\n\t\tfor path in self.available_databases:\n\t\t\tprint path", "def get_databases(self):\n query = mssqlqueries.get_databases()\n logger.info(u'Databases query: %s', query)\n for tabular_result in self.execute_query(query):\n return [x[0] for x in tabular_result[0]]", "def get_default_database(self):\n attr_name = mangle_delegate_name(self.__class__, '__default_database_name')\n default_db_name = getattr(self.delegate, attr_name)\n if default_db_name is None:\n raise ConfigurationError('No default database defined')\n\n return self[default_db_name]", "def db_for_read(self, model, **hints):\n if model._meta.app_label == self.app_label:\n return self.db_name\n return None", "def __init__(self, logger, dbi, owner):\n DBFormatter.__init__(self, logger, dbi)\n self.owner = \"%s.\" % owner if not owner in (\"\", \"__MYSQL__\") else \"\"\n self.sql = \\\n\"\"\"\nSELECT S.SITE_ID, S.SITE_NAME\nFROM %sSITES S \n\"\"\" % (self.owner)", "def luks_data_sources(broker):\n datasources = []\n\n commands = []\n if LocalSpecs.cryptsetup_luks_dump_token_commands in broker:\n commands.extend(broker[LocalSpecs.cryptsetup_luks_dump_token_commands])\n if LocalSpecs.cryptsetup_luks_dump_commands in broker:\n commands.extend(broker[LocalSpecs.cryptsetup_luks_dump_commands])\n\n for command in commands:\n lines_without_tokens = filter_token_lines(command.content)\n\n regex = re.compile(r'[\\t ]*(MK digest:|MK salt:|Salt:|Digest:)(\\s*([a-z0-9][a-z0-9] ){16}\\n)*(\\s*([a-z0-9][a-z0-9] )+\\n)?', flags=re.IGNORECASE)\n filtered_content = regex.sub(\"\", \"\\n\".join(lines_without_tokens) + \"\\n\")\n\n datasources.append(\n DatasourceProvider(content=filtered_content, relative_path=\"insights_commands/\" + command.cmd.replace(\"/\", \".\").replace(\" \", \"_\"))\n )\n\n if datasources:\n return datasources\n\n raise SkipComponent", "def database_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database_name\")", "def do_list(self, line):\n\t\tx = [i for i in self.client.list_databases() if i['name'] not in ['admin','config','line','local','mongoengine_test','pymongo_test']]\n\t\tfor db in x:\n\t\t\tprint(db['name'])", "def setupDatabases(self):\n param = self.getDefaultDatabaseConnectionParameter()\n db = DatabaseFactory.getDatabase(self.defaultDriver(), {})\n db.createDatabase(param)\n db.connect(param)\n if db.isConnected():\n self.settingsDb = db\n db.createObservations()\n db.createSensors()\n else:\n return False\n # replace by settings validation method later\n return self.check()", "def set_test_database(self):\n db_manager.db = SqliteDatabase(settings.UNITTEST_DATABASE_NAME)\n db_manager.Employee._meta.database = db_manager.db\n db_manager.LogEntry._meta.database = db_manager.db", "def _database(self):\n ...", "def __init__(self, \n logger_name='SetupDB', \n ini_filename='db.ini', \n table_names=['person', 'login_data']):\n self.logger = setup_logger(logger_name)\n self.ini_filename = ini_filename\n self.table_names = table_names\n self.conn_info = {}\n\n self.sql_query_table_person = sql_query_table_person\n self.sql_query_table_login_data = sql_query_table_login_data", "def fetchall(self, databaseName):\n pass", "def get_database(self, instance, name):\n return instance.get_database(name)", "def db_for_read(self, model, **hints):\r\n if model._meta.app_label == self.APP_LABEL:\r\n return self.DB_NAME\r\n return None", "def check_db(self):\n if self.db == 'user':\n db = USERS_LIST\n return db\n elif self.db == 'questions':\n db = QUESTIONS_LIST\n return db\n elif self.db == 'meetups':\n db = MEETUPS_LIST\n return db\n\n elif self.db == 'rsvp':\n db = RSVP_LIST\n return db", "def get_available_databases():\n return map(\n lambda (key, value): (key, value[\"description\"]),\n DumpConverter.DATABASES.items())", "def multi_database(database_factories):\n databases = {}\n result = []\n for factory in database_factories:\n name = factory.name or ''\n if name in databases:\n raise ValueError(\"Duplicate database name: %r\" % name)\n db = factory.open()\n db.databases = databases\n db.database_name = name\n databases[name] = db\n # Grrr bug in ZODB. Database doesn't declare that it implements\n # IDatabase.\n if not ZODB.interfaces.IDatabase.providedBy(db):\n zope.interface.directlyProvides(db, ZODB.interfaces.IDatabase)\n zope.component.provideUtility(db, ZODB.interfaces.IDatabase, name)\n db.setActivityMonitor(ZODB.ActivityMonitor.ActivityMonitor())\n result.append(db)\n\n return result, databases", "def db_for_write(self, model, **hints):\n state_db = self._db_name(model)\n if state_db in settings.DATABASES:\n name = state_db\n else:\n name = 'default'\n logger.debug('db_for_write({}): {}'.format(state_db, name))\n return name", "def change_db(cls, dbname):\n setattr(cls, 'db', staticmethod(lambda: Db(dbname)))", "def db_proxy_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"db_proxy_name\")", "def create_all_database_extensions(self) -> str:\n unique_databases = set(self._get_all_databases())\n for database in unique_databases:\n # load any pg extensions that are required\n db_conn = self.get_connection(database_override=database)\n for ext in self.extensions:\n statement = f'create extension if not exists \\\"{ext}\\\"'\n try:\n db_conn.execute(statement)\n except sqlalchemy.exc.IntegrityError as error:\n logger.error(\n 'Duplicate extension creation of %s caused an error:\\n%s', ext, error)", "def get_database_dsn():\n return getattr(config, f\"POSTGRES_DSN_{config.SERVER_MODE}\")", "def db_lookup(client):\n dblist_dict= client.get_list_database()\n # print(\"def db_lookup 010:\", dblist_dict)\n # print(\"def db_lookup 020:\", dblist_dict[3]['name'])\n # for element in dblist_dict:\n # print(\"db_lookup 3:\", element['name'])\n return dblist_dict", "async def _init_database(self):\n await self.database.executescript(\n f\"\"\"\n CREATE TABLE IF NOT EXISTS \"{DBUser.table_name}\" (\n \"user_id\" INTEGER PRIMARY KEY AUTOINCREMENT,\n \"name\" TEXT NOT NULL UNIQUE,\n \"created_at\" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,\n \"creation_flags\" INTEGER NOT NULL DEFAULT 0,\n \"creation_metadata\" TEXT,\n \"comment\" TEXT\n );\n CREATE TABLE IF NOT EXISTS \"{DBUserAlias.table_name}\" (\n \"user_id\" INTEGER NOT NULL,\n \"alias\" TEXT NOT NULL,\n \"case_sensitive\" BOOLEAN NOT NULL DEFAULT 0 CHECK(case_sensitive IN (0,1)),\n \"created_at\" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,\n \"creation_flags\" INTEGER NOT NULL DEFAULT 0,\n \"creation_metadata\" TEXT,\n \"comment\" TEXT,\n PRIMARY KEY (\"user_id\", \"alias\"),\n FOREIGN KEY (\"user_id\") REFERENCES \"{DBUser.table_name}\" (\"user_id\")\n ON DELETE CASCADE\n );\n CREATE TABLE IF NOT EXISTS \"{Participant.table_name}\" (\n \"participant_id\" INTEGER NOT NULL,\n \"name\" TEXT NOT NULL UNIQUE,\n \"user_id\" INTEGER,\n PRIMARY KEY (\"participant_id\"),\n FOREIGN KEY (\"user_id\") REFERENCES \"{DBUser.table_name}\" (\"user_id\")\n ON DELETE SET NULL\n );\n CREATE TABLE IF NOT EXISTS \"protocols\" (\n \"identifier\" TEXT NOT NULL,\n \"name\" TEXT NOT NULL,\n PRIMARY KEY (\"identifier\")\n ) WITHOUT ROWID;\n CREATE TABLE IF NOT EXISTS \"{Source.table_name}\" (\n \"source_id\" INTEGER NOT NULL,\n \"protocol\" TEXT NOT NULL,\n \"server\" TEXT,\n \"channel\" TEXT,\n PRIMARY KEY (\"source_id\"),\n FOREIGN KEY (\"protocol\") REFERENCES \"protocols\" (\"identifier\")\n ON UPDATE CASCADE\n );\n\n CREATE VIEW IF NOT EXISTS users_all_names (\n user_id, name, case_sensitive\n ) AS\n SELECT user_id, name, 1 FROM \"{DBUser.table_name}\"\n UNION\n SELECT user_id, alias, case_sensitive FROM \"{DBUserAlias.table_name}\";\n\n CREATE VIEW IF NOT EXISTS participants_all_names (\n participant_id, user_id, name, case_sensitive\n ) AS\n SELECT participant_id, user_id, name, 1 FROM \"{Participant.table_name}\"\n UNION\n SELECT participant_id, user_id, alias, case_sensitive FROM \"{DBUserAlias.table_name}\"\n JOIN \"{Participant.table_name}\" USING(user_id);\n\n CREATE UNIQUE INDEX IF NOT EXISTS \"idx_participants_user_id\"\n ON \"{Participant.table_name}\" (\"user_id\");\n\n CREATE TRIGGER IF NOT EXISTS tg_update_participant_name_from_user\n AFTER UPDATE OF name ON \"{DBUser.table_name}\"\n BEGIN\n UPDATE \"{Participant.table_name}\"\n SET name = new.name\n WHERE user_id = new.user_id;\n END;\n\n CREATE TRIGGER IF NOT EXISTS tg_update_user_name_from_participant\n AFTER UPDATE OF name ON \"{Participant.table_name}\"\n BEGIN\n UPDATE \"{DBUser.table_name}\"\n SET name = new.name\n WHERE user_id = new.user_id;\n END;\n\n CREATE TRIGGER IF NOT EXISTS tg_new_user_upsert_participants\n AFTER INSERT ON \"{DBUser.table_name}\"\n BEGIN\n INSERT INTO \"{Participant.table_name}\" (name, user_id)\n VALUES (new.name, new.user_id)\n ON CONFLICT (name) DO UPDATE\n SET user_id = new.user_id;\n END;\n\n CREATE TRIGGER IF NOT EXISTS tg_prevent_linked_participant_delete\n BEFORE DELETE ON \"{Participant.table_name}\"\n BEGIN\n SELECT RAISE(FAIL, 'Can''t delete participant that is linked to a user')\n FROM \"{Participant.table_name}\"\n WHERE participant_id = old.participant_id\n AND user_id IS NOT NULL;\n END;\n \"\"\"\n )", "def register_sqla_bindings(sqla_metadata):\n for sqla_maker in get_sqla_makers().values():\n sqla_maker.get_sqla_table(sqla_metadata)\n return", "def getDefaultDBName() -> str:\n return f\"dbname={getpass.getuser()}\"", "def main():\n discovered_path = AskFile(0, '*.db', 'Select the discovered database')\n if discovered_path is None:\n return\n\n with open(discovered_path, 'rb') as f:\n functions = pickle.load(f)\n\n rename_functions(functions)", "def getDatabaseName( self ):\n return self.mDbname", "def __get_available_databases(self, root):\n\t\tfor i in walk_tree(root):\n\t\t\tif '.sqlite3' in i:\n\t\t\t\tyield os.path.abspath(i)", "def populate_names(apps, schema_editor):\n Distillery = apps.get_model('distilleries', 'Distillery')\n for distillery in Distillery.objects.filter(name__isnull=True):\n collection = distillery.collection\n warehouse = collection.warehouse\n distillery.name = '%s.%s.%s' % (warehouse.backend, warehouse.name,\n collection.name)\n distillery.save()", "def db_for_write(self, model, **hints):\n if model._meta.app_label == self.app_label:\n return self.db_name\n return None", "def dbName(self, code) -> str:\n return f'{code}{self.name}'", "def show_dbs(*dbs):\n if dbs:\n log.debug(\"get dbs from pillar: %s\", dbs)\n result = {}\n for db in dbs:\n result[db] = __salt__[\"pillar.get\"](\"oracle:dbs:\" + db)\n return result\n else:\n pillar_dbs = __salt__[\"pillar.get\"](\"oracle:dbs\")\n log.debug(\"get all (%s) dbs from pillar\", len(pillar_dbs))\n return pillar_dbs", "def db_for_write(self, model, **hints):\n if hasattr(model, 'db_name'):\n return model.db_name\n return 'default'", "def database(self) -> str:\n\t\treturn os.getenv('APP_DATABASE', 'memory').lower()", "def replica_set_name(self):\n ...", "def database():\n return conf().database", "def test_backend_name_reporting(self):\n for volume_id in self.volume_id_list_without_prefix:\n self._test_backend_name_reporting_by_volume_id(volume_id)", "def db_for_read(self, model, **hints):\n if is_recon_model(model):\n return settings.RECON_NG_DATABASE_NAME\n\n return None", "def _db_uri_parts():\n return app.config['SQLALCHEMY_DATABASE_URI'].split('/')", "def setUp(self):\n\n fq_dataset_name = self.fq_table_names[0].split('.')\n self.fq_dataset_name = '.'.join(fq_dataset_name[:-1])\n\n fq_sandbox_name = self.fq_sandbox_table_names[0].split('.')\n self.fq_sandbox_name = '.'.join(fq_sandbox_name[:-1])\n\n super().setUp()", "def fulldbname(self):\n return 'myfls_'+self.user.username+'_'+self.dbname", "def parse_databases(default_dbname=\"cal_manager.db\"):\n db_list = []\n calconf = get_calconf()\n if not calconf:\n return db_list\n upload_cookie = calconf.get(\"upload_cookie\")\n # Allow old-format file to be read\n try:\n databases = calconf[\"databases\"]\n except KeyError:\n databases = calconf.get(\"database_dir\")\n if not databases:\n return db_list\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\", DeprecationWarning)\n warnings.warn(\"Use 'databases' instead of 'database_dir' in \"\n \"config file.\",\n DeprecationWarning\n )\n for line in databases.splitlines():\n if not line: # handle blank lines\n continue\n db, *flags = shlex.split(line)\n # \"get\" is default if there are no flags, but if any flags are\n # specified, then \"get\" must be there explicitly\n kwargs = {\"get_cal\": not bool(flags),\n \"store_cal\": False}\n for flag in flags:\n kwarg = f\"{flag}_cal\"\n if kwarg in kwargs:\n kwargs[kwarg] = True\n else:\n raise ValueError(\"{}: Unknown flag {!r}\".format(db, flag))\n\n expanded_db = path.expanduser(db)\n if path.isdir(expanded_db):\n db = path.join(db, default_dbname)\n cls = LocalDB\n elif path.isfile(expanded_db):\n cls = LocalDB\n elif \"/\" in expanded_db and \"//\" not in expanded_db:\n cls = LocalDB\n else: # does not check\n cls = RemoteDB\n kwargs[\"upload_cookie\"] = upload_cookie\n db_list.append((cls, db, kwargs))\n return db_list", "def get_database_directory(self):\n pass", "def monkeypatch_connections(self):\n \n def create_test_db(self, verbosity=1, autoclobber=False):\n \"\"\"\n Creates a test database, prompting the user for confirmation if the\n database already exists. Returns the name of the test database created.\n \"\"\"\n # Don't import django.core.management if it isn't needed.\n test_database_name = self._get_test_db_name()\n \n if self.connection.settings_dict.get('ENGINE', '').endswith('.sqlite3')\\\n and test_database_name != ':memory:':\n if os.access(test_database_name, os.F_OK):\n print \"sqlite test database found !\"\n \n #self._create_test_db(verbosity, autoclobber)\n \n self.connection.close()\n self.connection.settings_dict[\"NAME\"] = test_database_name\n \n # Confirm the feature set of the test database\n self.connection.features.confirm()\n \n # Get a cursor (even though we don't need one yet). This has\n # the side effect of initializing the test database.\n self.connection.cursor()\n\n return test_database_name\n \n def destroy_test_db(self, old_database_name, verbosity=1):\n \"\"\"\n Destroy a test database, prompting the user for confirmation if the\n database already exists.\n \"\"\"\n self.connection.close()\n test_database_name = self.connection.settings_dict['NAME']\n if verbosity >= 1:\n test_db_repr = ''\n if verbosity >= 2:\n test_db_repr = \" ('%s')\" % test_database_name\n print \"Ignore the test database for alias '%s'%s...\" % (\n self.connection.alias, test_db_repr)\n \n # Temporarily use a new connection and a copy of the settings dict.\n # This prevents the production database from being exposed to potential\n # child threads while (or after) the test database is destroyed.\n # Refs #10868 and #17786.\n settings_dict = self.connection.settings_dict.copy()\n settings_dict['NAME'] = old_database_name \n \n def _destroy_test_db(self, test_database_name, verbosity):\n print \"Keep the test database !\" #%test_database_name\n self.connection.close()\n \n \n for alias in connections:\n \"\"\"\n django.test.simple.DjangoTestSuiteRunner\n django.db.backends.creation\n django.db.backends.mysql.base\n \"\"\"\n connection = connections[alias]\n #if connection.settings_dict.get('ENGINE', '').endswith('.sqlite3'):\n \n if not self.options['setupdbs']:\n f1 = types.MethodType(create_test_db, connection.creation, DatabaseCreation)\n connection.creation.create_test_db = f1\n \n if not self.options['teardowndbs']:\n f2 = types.MethodType(destroy_test_db, connection.creation, DatabaseCreation)\n connection.creation.destroy_test_db = f2", "def __init_db(self, db_name):\n\t\tclient = pymongo.MongoClient(self.__db_url)\n\t\treturn client[db_name]", "def db_for_write(self, model, **hints):\n\n return self.db_name", "def db_proxy_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"db_proxy_name\")", "def get_owned_databases(cursor: Cursor, owner: Owner) -> List[str]:\n try:\n role = pgsql.get_role(cursor, owner_name(owner))\n except KeyError:\n return []\n else:\n return pgsql.get_role_databases(cursor, role)" ]
[ "0.7197303", "0.6543269", "0.6351389", "0.61743826", "0.58797634", "0.5850529", "0.5847243", "0.58301836", "0.5747333", "0.5737547", "0.5716585", "0.5708333", "0.56262535", "0.5581609", "0.5548496", "0.5545168", "0.5539052", "0.5539052", "0.5537416", "0.5527633", "0.5509341", "0.5498357", "0.54821086", "0.5468678", "0.5440239", "0.5432933", "0.54319775", "0.5428956", "0.53975326", "0.53838503", "0.5372532", "0.5370059", "0.53496665", "0.53226036", "0.5319347", "0.5319177", "0.53106415", "0.5307808", "0.5302745", "0.53007776", "0.52778673", "0.5257952", "0.5244305", "0.524109", "0.5238873", "0.5237684", "0.5236228", "0.5224055", "0.5215691", "0.5212502", "0.5210269", "0.5190939", "0.51802295", "0.5179333", "0.5173831", "0.516505", "0.5160693", "0.5157462", "0.51404446", "0.51207244", "0.51130426", "0.5107883", "0.51024836", "0.51011676", "0.50952214", "0.509284", "0.50827837", "0.5079807", "0.50756985", "0.5075451", "0.50638103", "0.5050962", "0.5050901", "0.504431", "0.501748", "0.501185", "0.50049084", "0.49971294", "0.49817863", "0.49815366", "0.49615166", "0.49544442", "0.49509147", "0.49459136", "0.49354756", "0.49329168", "0.49278784", "0.49268222", "0.49254733", "0.49150705", "0.4907564", "0.4906776", "0.4902331", "0.49013263", "0.489723", "0.48956054", "0.48875955", "0.48871005", "0.48849696", "0.48808083" ]
0.53393203
33
Default case with a single database. It might come informed or not from the source. Sources with multiple databases should overwrite this and apply the necessary filters.
def get_database_names(self) -> Iterable[str]: custom_database_name = self.service_connection.__dict__.get("databaseName") database_name = self.service_connection.__dict__.get( "database", custom_database_name or "default" ) # By default, set the inspector on the created engine self.inspector = inspect(self.engine) yield database_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def db_for_read(self, model, **hints):\n if model._meta.app_label == 'delivery':\n return 'db1'\n return None", "def get_default_database(self):\n attr_name = mangle_delegate_name(self.__class__, '__default_database_name')\n default_db_name = getattr(self.delegate, attr_name)\n if default_db_name is None:\n raise ConfigurationError('No default database defined')\n\n return self[default_db_name]", "def db_for_write(self, model, **hints):\n if model._meta.app_label == 'delivery':\n return 'db1'\n return None", "def db_for_read(self, model, **hints):\n if hasattr(model, 'db_name'):\n return model.db_name\n return 'default'", "def reset_database_to_default(self):\n _src = os.path.abspath(self.default_db)\n _dst = os.path.abspath(self.db_path)\n copyfile(_src, _dst)", "def db_for_read (self, model, **hints):\n return 'default'", "def default_DataSource(self, data_source):\n\n self._default_DataSource = self._get_obj_from_str(data_source)\n print(\"Setting default DataSource to {} version {}\".format(\n data_source.name,\n getattr(data_source, 'version', 'not specified')))", "def database_name(self):\n try:\n return self._database_name\n except:\n pass\n\n if 'X-UnitTest' in self.request.headers:\n if self.request.headers['X-UnitTest'] == 'True':\n self._database_name = TEST_DATABASE\n return TEST_DATABASE\n default_database = self.application.databases['default']['NAME']\n self._database_name = default_database\n return default_database", "def get (self, database, default=None):\n if hasattr(self, database):\n return getattr(self, database)\n else:\n return default", "def db_for_read(self, model, **hints):\n if model._meta.app_label == 'researcherquery':\n return 'safedb'\n return None", "def db_for_read(self, model, **hints):\n state_db = self._db_name(model)\n if state_db in settings.DATABASES:\n name = state_db\n else:\n name = 'default'\n logger.debug('db_for_read({}): {}'.format(state_db, name))\n return name", "def db_for_write(self, model, **hints):\n if hasattr(model, 'db_name'):\n return model.db_name\n return 'default'", "def db_for_read(self, model, **hints):\r\n if model._meta.app_label == self.APP_LABEL:\r\n return self.DB_NAME\r\n return None", "def db_for_read(self, model, **hints):\n if model._meta.app_label == 'compras':\n return 'db2'\n return None", "def default_schema_name_switch(self):\n return exclusions.closed()", "def db_for_read(self, model, **hints):\n if model._meta.app_label == self.app_label:\n return self.db_name\n return None", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")", "def _insert_default_fallback(self):\n db.add_destination_with_aliases(self.dbm,\n \"https://duckduckgo.com?q={}\",\n \"DuckDuckGo\",\n [\"ddg\"],\n True,\n True)", "def db_for_read(self, model, **hints):\n if is_recon_model(model):\n return settings.RECON_NG_DATABASE_NAME\n\n return None", "def db_for_write(self, model, **hints):\n if model._meta.app_label == 'researcherquery':\n return 'safedb'\n return None", "def test_gen_default_fallback_is_destination(self):\n # Start transaction and add default fallback to database\n # NOTE: Must begin a nested transaction, as autocommit=False by default\n # which automatically starts a transaction when Session is created.\n # See: https://docs.sqlalchemy.org/en/13/orm/session_api.html#sqlalchemy.orm.session.SessionTransaction\n self.session.begin_nested()\n self._insert_default_fallback()\n\n destination = db.gen_default_fallback(self.dbm)\n self.assertEqual(db.Destination, type(destination))\n self.assertEqual(\"https://duckduckgo.com?q={}\", destination.url)\n\n self.session.rollback()", "def check_db(self):\n if self.db == 'user':\n db = USERS_LIST\n return db\n elif self.db == 'questions':\n db = QUESTIONS_LIST\n return db\n elif self.db == 'meetups':\n db = MEETUPS_LIST\n return db\n\n elif self.db == 'rsvp':\n db = RSVP_LIST\n return db", "def db_for_write (self, model, **hints):\n return 'default'", "def get_database_name(self, data: dict) -> str: # pylint: disable=arguments-differ\n if not data[\"database_name\"] and self.service_connection.database:\n return self.service_connection.database\n return data[\"database_name\"]", "def db_for_write(self, model, **hints):\r\n if model._meta.app_label == self.APP_LABEL:\r\n return self.DB_NAME\r\n return None", "def db_for_write(self, model, **hints):\n if model._meta.app_label == 'compras':\n return 'db2'\n return None", "def test_gen_default_fallback_without_default_fallback(self):\n self.assertRaises(ValueError, db.gen_default_fallback, self.dbm)", "def switch(self, database, collection=None):\n self.connection.switchDatabase(database, collection if collection else self.coll_name)", "def gtfsdb_main(ctx, database):\n ctx.obj = dict()\n if not database and os.path.exists(DEFAULT_CONFIG_FILE):\n conf = json.load(open(DEFAULT_CONFIG_FILE, 'r'))\n database = conf['database']\n ctx.obj.update(dict(conf=conf))\n else:\n click.echo(\"No database selected!!\")\n sys.exit(1)\n ctx.obj.update(dict(database=Database(url=database), db_url=database))", "def allow_migrate(self, db, app_label, model_name=None, **hints):\n if app_label == 'delivery':\n return db == 'db1'\n return None", "def change_db(self):\n self.db = self.database.get()\n return self.db", "def db_for_write(self, model, **hints):\n state_db = self._db_name(model)\n if state_db in settings.DATABASES:\n name = state_db\n else:\n name = 'default'\n logger.debug('db_for_write({}): {}'.format(state_db, name))\n return name", "def database_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database_name\")", "def db_for_write(self, model, **hints):\n if model._meta.app_label == self.app_label:\n return self.db_name\n return None", "def db_for_read(self, model, **hints):\n\n return self.db_name", "def db_for_read(self, model, **hints):\n if model._meta.app_label == 'eotrts_student':\n return 'eotrts_db'\n elif model._meta.app_label == 'essential_english_words_1':\n return 'essential_english_db'\n return None", "def getDefaultDBName() -> str:\n return f\"dbname={getpass.getuser()}\"", "def db_for_read(self, model, **hints):\n if model._meta.app_label == 'data_collection':\n return 'guam_groundwater'\n return None", "def change(cls, db):\n cls.configs['db'] = db\n\n if cls.conn and cls.conn.open:\n cls.conn.select_db(db)", "def apply_defaults(self, db, dest, kvargs, lines):\n table = db.get_table(kvargs['table'])\n default_text = kvargs['default_text']\n table.find_default_from_allowable_range_descriptions(default_text)\n # Log the defaults\n logging.info(\"Defaults for table: {}\".format(table.name))\n for var in table.vars():\n if var.default:\n logging.info(\" {}: {}\".format(var.name,var.default))\n return True", "def db_for_read(self, model, **hints):\n if model._meta.app_label == 'test':\n return 'test'\n return None", "def getDatabaseName(self):\n raise NotImplementedError", "def database(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database\")", "def db_for_write(self, model, **hints):\n if is_recon_model(model):\n return settings.RECON_NG_DATABASE_NAME\n\n return None", "def uses_database(self, dbname):\n used = False\n if any([dbname.upper() in y for y in [x.upper() for x in self._dbnames]]):\n used = True\n return used", "def db_for_read(self, model, **hints):\n if model == FilterRecordingTracking:\n return 'db_rest_api'\n return None", "def dbname(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dbname\")", "def _database(self):\n ...", "def allow_migrate(self, db, app_label, model_name=None, **hints):\n if app_label == 'researcherquery':\n return db == 'safedb'\n return None", "def run_dbname(self, expanded, unexpanded) :\n\t\tif expanded :\n\t\t\treturn self.errormessage(\"Doesn't need any argument\")\n\t\tself.htmlmessage(self.__context.Control_Panel.db_name(), printable=1)", "def test_default_backend_used_when_not_specified():\n money_rates_settings.DEFAULT_BACKEND = CustomBackend\n call_command(\"update_rates\")\n\n assert 1 == RateSource.objects.filter(name=\"custom-backend\").count()\n assert 2 == Rate.objects.filter(source__name=\"custom-backend\").count()", "def allow_migrate(self, db, app_label, model_name=None, **hints):\n if app_label in ['awesome_users', 'awesome_rooms']:\n return db == 'common'\n else:\n return db == 'default'", "def database_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database_name\")", "def db_for_write(self, model, **hints):\n if model._meta.app_label == 'test':\n return 'test'\n return None", "def db_for_write(self, model, **hints):\n if model._meta.app_label == 'eotrts_student':\n return 'eotrts_db'\n elif model._meta.app_label == 'essential_english_words_1':\n return 'essential_english_db'\n return None", "def db_for_read(self, model, **hints):\n\t\tif model._meta.app_label == 'product':\n\t\t\treturn 'product_dbs'\n\t\treturn None", "def database(self, database):\n self._database = database", "def db_for_write(self, model, **hints):\n if model._meta.app_label == 'data_collection':\n return 'guam_groundwater'\n return None", "def _convert_rosetta_db_to_basic_db(self):\n pass", "def test_dummydb_basic(self):\n db = DummyDB()", "def allow_migrate(self, db, app_label, model=None, **hints):\n if app_label == 'data_collection':\n return db == 'guam_groundwater'\n return None", "def allow_migrate(self, db, app_label, model_name=None, **hints):\n if db == 'default':\n return True\n else:\n return False", "def db_for_write(self, model, **hints):\n return 'master'", "def db_for_read(self, model, **hints):\n if self.isAdminApp(model):\n return 'auth_db'\n return None", "def database(self):\n try:\n return self._database\n except:\n database = self.application.connection[self.database_name]\n self._database = database\n return database", "def default_connection():\n\n QtSql.QSqlDatabase.database('qt_sql_default_connection').close()\n QtSql.QSqlDatabase.removeDatabase('qt_sql_default_connection')\n db = QtSql.QSqlDatabase.addDatabase(\"QMYSQL\")\n host, database = read_settings(\"default\")\n db.setUserName('fab')\n db.setPassword('doylefab')\n db.setHostName(host)\n db.setDatabaseName(database)\n if db.open():\n return True\n else:\n db_err(db)\n return False", "def db(self):\n if self._for_write:\n return self._db or router.db_for_write(self.model, **self._hints)\n return self._db or router.db_for_read(self.model, **self._hints)", "def get_db_name(self):\n\t\treturn conf.db_name", "def default():", "def preprocess_database(database: str):\n # processed -> db, label_true, data_frame\n if database == \"breast\":\n processed = preprocess_breast()\n elif database == \"cmc\":\n processed = preprocess_cmc()\n elif database == \"adult\":\n processed = preprocess_adult()\n else:\n raise ValueError('database not found')\n\n return processed", "def change_db(cls, dbname):\n setattr(cls, 'db', staticmethod(lambda: Db(dbname)))", "def db_for_write(self, model, **hints):\n if model == FilterRecordingTracking:\n return 'db_rest_api'\n return None", "def database(self, database):\n if database is not None and len(database) > 256:\n raise ValueError(\"Invalid value for `database`, length must be less than or equal to `256`\")\n if database is not None and len(database) < 0:\n raise ValueError(\"Invalid value for `database`, length must be greater than or equal to `0`\")\n if database is not None and not re.search('[\\\\w\\\\_]{0,}', database):\n raise ValueError(\"Invalid value for `database`, must be a follow pattern or equal to `/[\\\\w\\\\_]{0,}/`\")\n\n self._database = database", "def set_db(db):\n global db_run # Imports the DB from the simulator\n db_run=db", "def db_for_write(self, model, **hints):\n\n return self.db_name", "def default_dataset(self):\n return self.get_by_index(self._default_index)", "def choose_db():\n\n def instantiate_db(db):\n if db == 1:\n db = Database(\"data_tvseries\")\n return db\n elif db == 2:\n db = Database(\"data_movies\")\n return db\n\n try:\n chosen_db = int(input(\"Which database would you like to browse?\\n\"\n \"1 - TV series database.\\n\"\n \"2 - movie database.\\n\"))\n options = [1, 2]\n assert 0 < chosen_db < options[-1] + 1\n chosen_db = instantiate_db(chosen_db)\n return chosen_db\n except Exception:\n print(\"Wrong value entered. Please choose again.\\n\")\n return choose_db()", "def run_new_sql(self):\n\n pass", "def database(self):\n return self.snowflake_options.database", "def __set_database_info(self):\n if self.service == \"sigs\":\n self.database_name = \"sigs_local\"\n # dict of dump files and the tables associated\n self.dump_files = {\n \"01_auth.sql\": [\"auth_group\", \"auth_group_permissions\", \"auth_permission\", \"auth_user\",\n \"auth_user_groups\", \"auth_user_user_permissions\"],\n \"02_django.sql\": [\"django_content_type\", \"django_migrations\", \"django_admin_log\", \"django_session\"],\n \"03_daily_irradience.sql\": [\"solar_models_dailyglobalirradiance\", \"solar_models_hourlyglobalirradiance\"]\n }", "def set_output_database (self, file_name):\n try:\n self.object_database=file_name\n self.filepath_output_database = os.path.join(self.filepath, self.output_database)\n print(\"set output_database filename to\", file_name)\n except:\n print(\"setting object database failed\")\n self.output_database=\"Output_database.db\"\n self.filepath_output_database = os.path.join(self.filepath, self.output_database)\n print(\"set output database name to default:\", self.object_database)\n return", "def acceptDB(self, db):\n self._db = db", "def reset_default_dd(tbl=None, add_checks=False):\n debug = False\n dd = mg.DATADETS_OBJ\n if debug:\n print(f'Resetting connection to default db. Add_checks: {add_checks}')\n try:\n dd.cur.close()\n dd.con.close()\n dd.set_dbe(dbe=mg.DBE_SQLITE, db=mg.SOFA_DB, tbl=tbl, \n add_checks=add_checks) ## Must reset entire dbe to change checks\n if debug: ## check the connection is still working\n obj_qtr = getdata.get_obj_quoter_func(mg.DBE_SQLITE)\n dd.cur.execute(f'SELECT * FROM {obj_qtr(dd.tbls[0])}')\n print(dd.cur.fetchone())\n except Exception as e:\n raise Exception(f'Problem resetting dd with tbl {tbl}.'\n f'\\nCaused by error: {b.ue(e)}')", "def rec_default(self):\n pass", "def allow_migrate(self, db, app_label, model_name=None, **hints):\n if 'target_db' in hints:\n return hints['target_db'] == self.db_entry\n if app_label in self.route_app_labels:\n return db == self.db_entry\n return None", "def getDatabaseName( self ):\n return self.mDbname", "def get_database(self):\n return self.database", "def db_for_write(self, model, **hints):\n\t\tif model._meta.app_label == 'product':\n\t\t\treturn 'product_dbs'\n\t\treturn None", "def database_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_name\")", "def getDefaultDatabaseConnectionParameter(self):\n return { 'path' : os.path.join(self.configPath(), 'databases', 'settings.db') }", "def database():\n return conf().database", "def default(self, default=None):\n\n def default_value_list(sources: List[str] = None):\n \"\"\"\n Infores default method for a list of input knowledge source names.\n\n Parameters\n ----------\n sources: List[str]\n List of Knowledge source name strings being processed.\n\n Returns\n -------\n List[str]\n Infores identifiers mapped to input source strings.\n\n \"\"\"\n if not default:\n return list()\n if not sources:\n return [default]\n else:\n return sources\n\n def default_value_scalar(source=None):\n \"\"\"\n Infores default method for single input knowledge source name.\n\n Parameters\n ----------\n source: str\n Knowledge source name string being processed.\n\n Returns\n -------\n str\n Infores identifier mapped to the input source string.\n\n \"\"\"\n if not default:\n return None\n if not source:\n return default\n else:\n return source\n\n if self.ksf in column_types and column_types[self.ksf] == list:\n return default_value_list\n else:\n # not sure how safe an assumption for non-list column_types, but...\n return default_value_scalar", "def get_single_db_name():\n expected_db_name = os.environ.get(\"MONGO_DB\")\n if not expected_db_name and is_testing():\n expected_db_name = f\"Test-{time.time_ns() // 1000000}\"\n\n return expected_db_name", "def setdb_params(self, mongouri=None, dbname=\"testrecommender\"):\n if mongouri is not None:\n self._mongouri = mongouri\n if dbname is not None:\n self._dbname = dbname\n self._outputlogfile = self._outputlogfile + dbname + \"_outputlog.txt\"", "def test_make_default_ach_user(self):\n\n user = self.client.users.create({})\n\n FundingSources.get_user_ach_funding_source(user)\n source = FundingSources.get_user_ach_funding_source(user)\n\n default = self.client.funding_sources(source.token).make_default()\n\n verify_payment_card_response_model(\n self, default, {'is_default_account': True})", "def test_set_databases(self):\n Config.set_databases({\n 'default': {\n 'url': 'bolt://cypher-db:7687',\n 'username': 'neo4j',\n 'password': 'cypher',\n },\n })\n\n default_database = Config.databases.get('default', None)\n self.assertIsNotNone(default_database)", "def _get_database(self, options):\n database_key = options.get('database')\n if not database_key:\n if len(settings.DATABASES) >= 2:\n errmsg = \"Because this project contains more than one database, you\"\n errmsg += \" must specify the --database option.\"\n raise CommandError(errmsg)\n database_key = settings.DATABASES.keys()[0]\n return settings.DATABASES[database_key]", "def default_global_location(database):\n\n for dataset in get_many(database, *[equals(\"location\", None)]):\n dataset[\"location\"] = \"GLO\"\n return database", "def _single_data_source(self) -> DataSource:\n data_source = None\n for meta_column in self._meta_columns:\n if data_source is None:\n data_source = meta_column.data_source\n elif data_source is not meta_column.data_source:\n raise SomeError('Mixed data sources are not supported')\n if data_source is None:\n raise SomeError('The column list provides no data source')\n return data_source", "def test_make_default_ach_business(self):\n\n business = self.client.businesses.create({})\n\n FundingSources.get_business_ach_funding_source(business)\n source = FundingSources.get_business_ach_funding_source(business)\n\n default = self.client.funding_sources(source.token).make_default()\n\n verify_payment_card_response_model(\n self, default, {'is_default_account': True})" ]
[ "0.6282125", "0.6158884", "0.6089975", "0.59566677", "0.5930733", "0.591902", "0.59030586", "0.57954264", "0.57474667", "0.5674948", "0.5663659", "0.56381756", "0.5626141", "0.5607359", "0.5583429", "0.5558164", "0.5541641", "0.5541641", "0.5531401", "0.54835534", "0.5477847", "0.5477542", "0.54468143", "0.5444289", "0.54436624", "0.5416509", "0.5390688", "0.5377644", "0.53609526", "0.5355416", "0.535006", "0.5340727", "0.534047", "0.5338766", "0.5333054", "0.53120637", "0.53110045", "0.5310991", "0.53015214", "0.52799904", "0.52778584", "0.5269706", "0.52514213", "0.52465546", "0.5241932", "0.521512", "0.52139956", "0.51722413", "0.51655674", "0.5163235", "0.51481944", "0.5145656", "0.51299244", "0.512208", "0.51169986", "0.5106475", "0.5105869", "0.5086826", "0.5084696", "0.50764155", "0.50759184", "0.5070485", "0.5061759", "0.50551605", "0.5049879", "0.5034012", "0.50339544", "0.50285435", "0.5025038", "0.5014046", "0.5007534", "0.50073344", "0.50048757", "0.49945915", "0.4990879", "0.49873286", "0.4972055", "0.49628022", "0.4961893", "0.4961122", "0.49568352", "0.495456", "0.49502623", "0.49495307", "0.49385914", "0.49067387", "0.48998913", "0.4892986", "0.48862883", "0.4884958", "0.48800772", "0.48759046", "0.4871357", "0.48704314", "0.4866428", "0.48655942", "0.48609984", "0.4858786", "0.48584843", "0.48500258", "0.48477697" ]
0.0
-1
From topology. Prepare a database request and pass it to the sink
def yield_database(self, database_name: str) -> Iterable[CreateDatabaseRequest]: yield CreateDatabaseRequest( name=database_name, service=EntityReference( id=self.context.database_service.id, type="databaseService", ), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def before_request():\n\tg.db = sql.connect(host=cfg.dbhost, port=cfg.dbport, user=cfg.user,\\\n\t\tpasswd=cfg.password, db=cfg.database,\\\n\t\tcharset=cfg.charset)", "def __init__(self, dbname, host, writeport):\n self.dbname = dbname\n self.host = host\n self.writeport = writeport\n self.readouts = {} # cache of readout information\n self.filters = {} # data reduction filters per channel\n self.readsets = [] # pre-defined sets of readouts for negotiating bulk transfers", "async def query_request_handler(db_pool, processed_request, request):\n # First we parse the query to prepare it to be used in the SQL function\n # We create the list of the parameters that the SQL function needs\n correct_parameters = [\n\t\"variantType\",\n\t\"start\",\n\t\"startMin\",\n\t\"startMax\",\n\t\"end\",\n\t\"endMin\",\n\t\"endMax\",\n\t\"referenceName\",\n\t\"referenceBases\",\n\t\"alternateBases\",\n\t\"assemblyId\",\n\t\"datasetIds\",\n \"filters\"]\n \n int_params = ['start', 'end', 'endMax', 'endMin', 'startMax', 'startMin']\n\n query_parameters = []\n\n # Iterate correct_parameters to create the query_parameters list from the processed_request \n # in the requiered order and with the right types\n for param in correct_parameters:\n query_param = processed_request.get(param)\n if query_param:\n if param in int_params:\n query_parameters.append(int(query_param))\n else:\n query_parameters.append(str(query_param))\n else:\n if param in int_params:\n query_parameters.append(None)\n else:\n query_parameters.append(\"null\")\n\n\n # At this point we have a list with the needed parameters called query_parameters, the only thing \n # laking is to update the datasetsIds (it can be \"null\" or processed_request.get(\"datasetIds\"))\n # then we have to take into account the access permissions\n\n LOG.debug(f\"Correct param: {correct_parameters}\")\n LOG.debug(f\"Query param: {query_parameters}\")\n LOG.debug(f\"Query param types: {[type(x) for x in query_parameters]}\")\n\n # We want to get a list of the datasets available in the database separated in three lists\n # depending on the access level (we check all of them if the user hasn't specified anything, if some\n # there were given, those are the only ones that are checked)\n public_datasets, registered_datasets, controlled_datasets = await fetch_datasets_access(db_pool, query_parameters[-2])\n\n ##### TEST CODE TO USE WHEN AAI is integrated\n # access_type, accessible_datasets = access_resolution(request, request['token'], request.host, public_datasets,\n # registered_datasets, controlled_datasets)\n # LOG.info(f\"The user has this types of acces: {access_type}\")\n # query_parameters[-2] = \",\".join([str(id) for id in accessible_datasets])\n ##### END TEST\n\n # NOTE that rigth now we will just focus on the PUBLIC ones to easen the process, so we get all their \n # ids and add them to the query\n query_parameters[-2] = \",\".join([str(id) for id in public_datasets])\n\n # We adapt the filters parameter to be able to use it in the SQL function (e.g. '(technology)::jsonb ?& array[''Illumina Genome Analyzer II'', ''Illumina HiSeq 2000'']')\n if query_parameters[-1] != \"null\":\n processed_filters_param, _ = await prepare_filter_parameter(db_pool, query_parameters[-1])\n query_parameters[-1] = processed_filters_param\n\n # We will output the datasets depending on the includeDatasetResponses parameter\n include_dataset = \"\"\n if processed_request.get(\"includeDatasetResponses\"):\n include_dataset = processed_request.get(\"includeDatasetResponses\")\n else:\n include_dataset = \"ALL\"\n\n LOG.info(f\"Query FINAL param: {query_parameters}\")\n LOG.info('Connecting to the DB to make the query.')\n\n datasets = await get_datasets(db_pool, query_parameters, include_dataset)\n\n LOG.info('Query done.')\n\n # We create the final dictionary with all the info we want to return\n beacon_response = { 'beaconId': __id__,\n 'apiVersion': __apiVersion__,\n 'exists': any([x['exists'] for x in datasets]),\n 'info': None,\n 'alleleRequest': processed_request,\n 'datasetAlleleResponses': filter_exists(include_dataset, datasets)}\n \n # Before returning the response we need to filter it depending on the access levels\n beacon_response = {\"beaconAlleleResponse\": beacon_response}\n\n # NOTE we hardcode accessible_datasets and user_levels it because authentication is not implemented yet\n accessible_datasets = public_datasets\n user_levels = [\"PUBLIC\"] \n filtered_response = filter_response(beacon_response, ACCESS_LEVELS_DICT, accessible_datasets, user_levels, query2access)\n\n return filtered_response[\"beaconAlleleResponse\"]", "def _jdbc_producer_destination(self, pipeline_builder):\n self.destination_system = self.environments['database'].engine.dialect.name\n self.destination_format = None\n\n table_name = get_random_string().lower()\n self._create_table_if_not_exists(table_name)\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer', type='destination')\n jdbc_producer.set_attributes(default_operation=\"INSERT\",\n field_to_column_mapping=[],\n enclose_object_names=True,\n use_multi_row_operation=True,\n statement_parameter_limit=32768,\n table_name=table_name)\n query = f'TRUNCATE TABLE {table_name}'\n stop_stage = pipeline_builder.add_stop_event_stage('JDBC Query')\n if Version(self.sdc_builder.version) < Version('3.14.0'):\n stop_stage.set_attributes(sql_query=query)\n else:\n stop_stage.set_attributes(sql_queries=[query])\n return jdbc_producer, pipeline_builder", "async def prepared(self, *args, **kwargs):\n pass", "def before_request():\n g.db = connect_db()", "def before_request():\n g.db = connect_db()", "def before_request():\n g.db = connect_db()", "def before_request():\n g.db = connect_db()", "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n logging.info(\"****SaveRestaurantsPipeline: database connected****\")", "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n logging.info(\"****DuplicatesPipeline: database connected****\")", "def _jdbc_query_origin(self):\n self.origin_system = self.environments['database'].engine.dialect.name\n self._setup_origin_table()\n pipeline_builder = self.sdc_builder.get_pipeline_builder()\n jdbc_query_consumer = pipeline_builder.add_stage('JDBC Query Consumer', type='origin')\n jdbc_query_consumer.set_attributes(incremental_mode=False,\n sql_query=f'SELECT * FROM {self.dataset}')\n return jdbc_query_consumer, pipeline_builder", "def _setup_origin_table(self):\n if self._create_table_if_not_exists(self.dataset):\n return\n\n directory, pipeline_builder = self._directory_origin(MAX_CONCURRENCY)\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer', type='destination')\n jdbc_producer.set_attributes(default_operation=\"INSERT\",\n field_to_column_mapping=[],\n enclose_object_names=True,\n use_multi_row_operation=True,\n statement_parameter_limit=32768,\n table_name=self.dataset)\n\n directory >> jdbc_producer\n\n pipeline = pipeline_builder.build().configure_for_environment(self.environments['database'])\n self.sdc_executor.add_pipeline(pipeline)\n self.sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(self.record_count, timeout_sec=LOAD_TIMEOUT)\n self.sdc_executor.stop_pipeline(pipeline)\n self.sdc_executor.remove_pipeline(pipeline)", "def build_input_db():\n build_input_database()", "def prepare(self, db):\n raise NotImplementedError('GenericEngine.prepare is an abstract method.')", "def __init__(self, dbname, host, readport):\n self.host = host\n self.readport = readport\n self.dbname = dbname\n self.readsets = [] # pre-defined sets of readouts for negotiating bulk transfers", "def __init__(self, database, server_architecture):\n # connection\n self.connection = sqlite3.connect(database)\n self.cursor = self.connection.cursor()\n self.outputs = Outputs()\n self.subsections = self.init_subsections(server_architecture)\n self.prep_database(database)", "def db(request):\n connection = request.param\n return connection", "def _database(self):\n ...", "def _prepare_raw_data(kwargs):\n path = kwargs.get(\"path\", None)\n output_path = kwargs.get(\"output_path\", None)\n data_source = DataSource.best_available_data_source()\n for job in data_source.jobs(\n source=\"raw\", path=path, data_path=output_path, stateful=False):\n data_source.write_job(data=job, path=output_path)\n for traffic in data_source.traffics(\n source=\"raw\", path=path, data_path=output_path, stateful=False):\n data_source.write_traffic(data=traffic, path=output_path)", "def dbinit( *args, **kwargs ):", "def initdata(): # pylint: disable=too-many-statements\n\n # auth test data\n db.session.add(User(username='user1', active=True, roles=['user', 'operator', 'admin']))\n\n # scheduler test data\n db.session.add(Excl(family=ExclFamily.network, value='127.66.66.0/26', comment='blacklist 1'))\n db.session.add(Excl(family=ExclFamily.regex, value=r'^tcp://.*:22$', comment='avoid ssh'))\n\n queue = Queue(\n name='dev dummy',\n config=yaml_dump({'module': 'dummy', 'args': '--dummyparam 1'}),\n group_size=2,\n priority=10,\n active=True\n )\n db.session.add(queue)\n for target in range(3):\n db.session.add(Target(target=target, queue=queue))\n\n db.session.add(Queue(\n name='pentest full syn scan',\n config=yaml_dump({\n 'module': 'nmap',\n 'args': '-sS -A -p1-65535 -Pn --max-retries 3 --script-timeout 10m --min-hostgroup 20 --min-rate 900 --max-rate 1500'\n }),\n group_size=20,\n priority=10,\n ))\n\n db.session.add(Queue(\n name='sner_disco ack scan top10000',\n config=yaml_dump({'module': 'nmap', 'args': '-sA --top-ports 10000 -Pn', 'timing_perhost': 8}),\n group_size=1000,\n priority=10,\n ))\n\n db.session.add(Queue(\n name='sner_data version scan basic',\n config=yaml_dump({'module': 'manymap', 'args': '-sV --version-intensity 4 -Pn', 'delay': 10}),\n group_size=50,\n priority=15,\n ))\n\n db.session.add(Queue(\n name='sner_data version scan intense',\n config=yaml_dump({'module': 'manymap', 'args': '-sV --version-intensity 8 -Pn', 'delay': 10}),\n group_size=50,\n priority=15,\n ))\n\n db.session.add(Queue(\n name='sner_disco ipv6 dns discover',\n config=yaml_dump({'module': 'six_dns_discover', 'delay': 1}),\n group_size=1000,\n priority=10,\n ))\n\n db.session.add(Queue(\n name='sner_disco ipv6 enum discover',\n config=yaml_dump({'module': 'six_enum_discover', 'rate': 100}),\n group_size=5,\n priority=10,\n ))\n\n db.session.add(Queue(\n name='sner_data script scan basic',\n config=yaml_dump({\n 'module': 'manymap',\n 'args': '-sS --script default,http-headers,ldap-rootdse,ssl-cert,ssl-enum-ciphers,ssh-auth-methods --script-timeout 10m -Pn',\n 'delay': 10\n }),\n group_size=50,\n priority=15,\n ))\n\n db.session.add(Queue(\n name='sner_sweep ack scan portA',\n config=yaml_dump({'module': 'nmap', 'args': '-sA -p1099 -Pn', 'timing_perhost': 1}),\n group_size=4000,\n priority=50,\n ))\n\n db.session.add(Queue(\n name='sner_sweep version scan basic',\n config=yaml_dump({'module': 'manymap', 'args': '-sV --version-intensity 4 -Pn', 'delay': 10}),\n group_size=50,\n priority=55,\n ))\n\n # storage test data host1\n aggregable_vuln = {'name': 'aggregable vuln', 'xtype': 'x.agg', 'severity': SeverityEnum.medium}\n\n host = Host(\n address='127.4.4.4',\n hostname='testhost.testdomain.test<script>alert(1);</script>',\n os='Test Linux 1',\n comment='a some unknown service server'\n )\n db.session.add(host)\n\n db.session.add(Service(\n host=host,\n proto='tcp',\n port=12345,\n state='open:testreason',\n name='svcx',\n info='testservice banner',\n comment='manual testservice comment'\n ))\n\n db.session.add(Vuln(host=host, **aggregable_vuln))\n\n # storage test data host2\n host = Host(\n address='127.3.3.3',\n hostname='testhost1.testdomain.test',\n os='Test Linux 2',\n comment='another server'\n )\n db.session.add(host)\n\n db.session.add(Service(\n host=host,\n proto='tcp',\n port=12345,\n state='closed:testreason',\n name='svcx'\n ))\n\n db.session.add(Vuln(\n host=host,\n name='test vulnerability',\n xtype='testxtype.123',\n severity=SeverityEnum.critical,\n comment='a test vulnerability comment',\n refs=['ref1', 'ref2'],\n tags=['tag1', 'tag2']\n ))\n\n db.session.add(Vuln(\n host=host,\n name='another test vulnerability',\n xtype='testxtype.124',\n severity=SeverityEnum.high,\n comment='another vulnerability comment',\n tags=None\n ))\n\n db.session.add(Vuln(\n host=host,\n name='vulnerability1',\n xtype='testxtype.124',\n severity=SeverityEnum.medium,\n tags=['info']\n ))\n\n db.session.add(Vuln(\n host=host,\n name='vulnerability2',\n xtype='testxtype.124',\n severity=SeverityEnum.low,\n tags=['report']\n ))\n\n db.session.add(Vuln(\n host=host,\n name='vulnerability2',\n xtype='testxtype.124',\n severity=SeverityEnum.info,\n tags=['info']\n ))\n\n db.session.add(Vuln(\n host=host,\n service=Service.query.first(),\n name='vulnerability3',\n xtype='testxtype.124',\n severity=SeverityEnum.unknown,\n tags=['report']\n ))\n\n db.session.add(Vuln(host=host, **aggregable_vuln))\n\n db.session.add(Note(\n host=host,\n xtype='sner.testnote',\n data='testnote data',\n comment='test note comment'\n ))\n\n db.session.commit()", "def make_query(self):", "def __init__(__self__, *,\n database_name: str,\n machine_name: str,\n server_name: str,\n source: str,\n source_computer_id: str,\n vmuuid: str,\n workspace_id: str):\n pulumi.set(__self__, \"database_name\", database_name)\n pulumi.set(__self__, \"machine_name\", machine_name)\n pulumi.set(__self__, \"server_name\", server_name)\n pulumi.set(__self__, \"source\", 'OnPremiseSql')\n pulumi.set(__self__, \"source_computer_id\", source_computer_id)\n pulumi.set(__self__, \"vmuuid\", vmuuid)\n pulumi.set(__self__, \"workspace_id\", workspace_id)", "def __init__(__self__, *,\n database: pulumi.Input[str],\n host: pulumi.Input[str],\n port: pulumi.Input[float]):\n pulumi.set(__self__, \"database\", database)\n pulumi.set(__self__, \"host\", host)\n pulumi.set(__self__, \"port\", port)", "def handle(self):\n try:\n conn = sqlite.connect(\"temp.db\")\n while True:\n data = self.request.recv(48)\n if not data:\n break\n parts = struct.unpack(\"dddddd\", data)\n print_datapoint(parts)\n store_datapoint(conn, parts)\n except KeyboardInterrupt:\n pass\n finally:\n conn.close()", "def prepare(self, request):\n pass", "def handler(event, context):\n\n with conn.cursor() as cur:\n dbtool = DBTool(conn, cur)\n resp = getattr(dbtool, event[\"fn\"])(*event[\"args\"], **event[\"kwargs\"])\n\n logger.info(resp)\n\n return resp", "def make_request(dbname='default'):\n num_beams = get_num_to_request()\n if not num_beams:\n # Request size is 0\n return\n dlm_cout.outs(\"Requesting data\\nIssuing a request of size %d\" % num_beams)\n\n # Ask to restore num_beams\n db = database.Database(dbname)\n QUERY = \"SELECT f.obs_id FROM full_processing as f LEFT JOIN processing AS p ON f.obs_id = p.obs_id WHERE f.status='available' AND p.details is NULL LIMIT %d\"%num_beams\n db.cursor.execute(QUERY)\n obs_ids = [row[0] for row in db.cursor.fetchall()]\n\n # Ask for an uuid\n QUERY = \"SELECT UUID();\"\n db.cursor.execute(QUERY)\n guid = db.cursor.fetchone()[0]\n\n if not obs_ids:\n print \"There are no files to be restored.\"\n return\n\n # Mark the beams for restorations\n for obs_id in obs_ids:\n QUERY = \"UPDATE full_processing SET status='requested', guid='%s', updated_at=NOW() WHERE obs_id=%s\"%(guid, obs_id)\n db.cursor.execute(QUERY)\n db.conn.close()\n\n #if guid == \"fail\":\n # raise pipeline_utils.PipelineError(\"Request for restore returned 'fail'.\")\n\n requests = jobtracker.query(\"SELECT * FROM requests WHERE guid='%s'\" % guid)\n\n if requests:\n # Entries in the requests table exist with this GUID!?\n raise pipeline_utils.PipelineError(\"There are %d requests in the \" \\\n \"job-tracker DB with this GUID %s\" % \\\n (len(requests), guid))\n\n jobtracker.query(\"INSERT INTO requests ( \" \\\n \"numbits, \" \\\n \"numrequested, \" \\\n \"file_type, \" \\\n \"guid, \" \\\n \"created_at, \" \\\n \"updated_at, \" \\\n \"status, \" \\\n \"details) \" \\\n \"VALUES (%d, %d, '%s', '%s', '%s', '%s', '%s', '%s')\" % \\\n (config.download.request_numbits, num_beams, \\\n config.download.request_datatype, guid, \\\n jobtracker.nowstr(), jobtracker.nowstr(), 'waiting', \\\n 'Newly created request'))", "def connect(self, dbapi_connection, connection_record):", "def before_request():\n db.connect()", "def db_handler():\n\n pass", "def __init__(self, *args):\n\n super(Recorder, self).__init__('RECORDER', *args)\n\n self.client = InfluxDBClient(\n host=self.configprops.influx_host,\n port=self.configprops.influx_port\n )\n\n databases_raw = self.client.get_list_database()\n\n databases = list((i['name'] for i in databases_raw))\n\n if self.configprops.influx_database not in databases:\n self.client.create_database(self.configprops.influx_database)\n\n self.client.switch_database(self.configprops.influx_database)\n\n self.current_position = {'lat': 0, 'lon': 0}", "def empty_graph_db(request) -> graph_tuple_database.Database:\n yield from testing_databases.YieldDatabase(\n graph_tuple_database.Database, request.param\n )", "def __init__(\n self,\n db,\n collection,\n data_source_name=None,\n identifier_mapping=None,\n http_get=None,\n metadata_client=None,\n content_modifier=None,\n map_from_collection=None,\n mirrors=None,\n ):\n super(OPDS2Importer, self).__init__(\n db,\n collection,\n data_source_name,\n identifier_mapping,\n http_get,\n metadata_client,\n content_modifier,\n map_from_collection,\n mirrors,\n )\n\n self._logger = logging.getLogger(__name__)", "def _create_jdbc_producer_pipeline(pipeline_builder, pipeline_title, raw_data, table_name, operation):\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n\n FIELD_MAPPINGS = [dict(field='/id', columnName='id'),\n dict(field='/name', columnName='name')]\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer')\n jdbc_producer.set_attributes(default_operation=operation,\n table_name=table_name,\n field_to_column_mapping=FIELD_MAPPINGS,\n stage_on_record_error='STOP_PIPELINE')\n\n trash = pipeline_builder.add_stage('Trash')\n dev_raw_data_source >> record_deduplicator >> jdbc_producer\n record_deduplicator >> trash\n\n return pipeline_builder.build(title=pipeline_title)", "def __init__(self, connection=None, url=None,\r\n table=None, statement=None, schema=None, autoinit = True,\r\n **options):\r\n\r\n super(SQLDataSource, self).__init__()\r\n\r\n if not table and not statement:\r\n raise AttributeError(\"Either table or statement should be \" \\\r\n \"provided for SQL data source\")\r\n\r\n if statement:\r\n raise NotImplementedError(\"SQL source stream based on statement \" \\\r\n \"is not yet implemented\")\r\n\r\n if not options:\r\n options = {}\r\n\r\n self.url = url\r\n self.connection = connection\r\n\r\n self.table_name = table\r\n self.statement = statement\r\n self.schema = schema\r\n self.options = options\r\n\r\n self.context = None\r\n self.table = None\r\n self.fields = None\r\n\r\n if autoinit:\r\n self.initialize()", "def execute(self, context):\n #### Postgres SELECT query block\n try:\n # Init Airflow Postgres Hook\n pg = PostgresHook(postgres_conn_id=self.postgres_conn_id)\n # Get records via an SQL query\n # with open(self.sql) as sql_file: sql = sql_file.read()\n records = pg.get_records(sql=self.sql)\n # Raise block exception \n except: raise\n #### Transformations block\n try:\n # Apply transformation function\n results = self.transform_function(records)\n # Raise block exception \n except: raise \n #### JSON export block\n try:\n # Get file absolute path\n self.abs_file_path = get_absolute_path(self.file_path, context)\n # Export as JSON file\n with open(self.abs_file_path, \"w\") as json_file:\n json.dump(results, json_file, indent=4)\n # Raise block exception \n except: raise", "def __post_init__(self):\n self.dbase = databases.Database(\n self.dsn,\n min_size=self.min_size,\n max_size=self.max_size\n )\n self.engine, self.meta = self.get_engine_metadata()", "def stream(ctx, db_addr):\n ctx.ensure_object(Context)\n ctx.obj.db = None\n if db_addr:\n addr_list = db_addr.split(\":\")\n if len(addr_list) != 2:\n raise ValueError(\"Address must be of the form <host>:<port>\")\n logging.info(\"Connecting to database. Host: {!s} Port: {!s}\".format(\n addr_list[0], addr_list[1]))\n ctx.obj.db = aceclient.AceDB(host=addr_list[0], port=addr_list[1])", "def __init__(self, args):\n self._source_host = args.source_host\n self._target_host = args.target_host\n self._replica_set = args.replica_set\n self._user = args.user\n self._password = args.password\n self._poll_interval = args.interval\n self._lag_key = args.region + '_' + args.replica_set + '_lag'\n # We assume a local collectd installation\n self._stat_client = StatsClient()", "def __init__(self):\n self._connection = get_db_connection()", "def prepare(self):", "def handle_request_payload(self, input_args):\n\n if self.resource['operation'] == PyMongoEvent.INSERT_MANY:\n add_data_if_needed(self.resource['metadata'], 'Items',\n input_args[0])\n\n elif self.resource['operation'] == PyMongoEvent.INSERT_ONE:\n add_data_if_needed(self.resource['metadata'], 'Item',\n input_args[0])\n\n elif self.resource['operation'] in PyMongoEvent.FILTER_OPERATIONS:\n add_data_if_needed(self.resource['metadata'], 'Filter',\n input_args[0])\n\n if self.resource['operation'] == 'update_one':\n add_data_if_needed(self.resource['metadata'], 'New Values',\n input_args[1])", "def post(self):\n blob_key = self.request.get(\"blobkey\")\n\n database_creation.run(blob_key)", "def create_gt_database_template(self):\n pass\n with self.connection as cursor:\n fn = os.path.join(os.path.dirname(__file__), 'gtlog.sql')\n self.cursor.execute(open(fn, \"r\").read())", "def __init__(self, database):\n self.database = database", "def _build_db_data(self):\n self.logger.debug('Bulding task db document.')\n db_data = {}\n db_data.update(self.query)\n db_data['condition'] = False\n db_data['records'] = []\n self.db_collection.insert_one(db_data)\n return db_data", "def __init__(self):\n\t\tself.obtainDatabaseConnection()", "def exportDB(self):\n sourcesession=svc.connect(self.__source,accessMode=coral.access_Update)\n destsession=svc.connect(self.__dest,accessMode = coral.access_Update)\n try:\n dbcp=DBCopy(sourcesession,destsession,1024)\n if self.__all:\n dbcp.copyDB()\n elif self.__inv:\n dbcp.copyInventory()\n elif len(self.__tree) != 0:\n dbcp.copyTrees([self.__tree])\n del sourcesession\n del destsession\n except Exception, e:\n print str(e)\n del sourcesession\n del destsession", "def ingest():\n db.delete_dataset_records(DATASET_ID)\n\n db.insert_dataset({\n 'dataset_id': DATASET_ID,\n 'title': 'North American Breeding Bird Survey (BBS)',\n 'version': '2016.0',\n 'url': 'https://www.pwrc.usgs.gov/bbs/'})\n\n to_taxon_id = insert_taxa()\n to_place_id = insert_places()\n to_event_id = insert_events(to_place_id)\n insert_counts(to_event_id, to_taxon_id)", "def __init__(self):\r\n self.conn = create_connection(DATABASE_PATH)", "def before_request():\n g.db = models.DB\n g.db.connect()", "def before_request():\n g.db = models.DATABASE\n g.db.connect()", "def before_request():\n g.db = models.DATABASE\n g.db.connect()", "def before_request():\n g.db = models.DATABASE\n g.db.connect()", "def before_request():\n g.db = models.DATABASE\n g.db.connect()", "def before_request():\n g.db = models.DATABASE\n g.db.connect()", "def before_request():\n try:\n g.conn = engine.connect()\n except:\n print (\"uh oh, problem connecting to database\")\n import traceback; traceback.print_exc()\n g.conn = None", "def __init__(__self__,\n resource_name: str,\n args: DatabaseArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def gtfsdb_main(ctx, database):\n ctx.obj = dict()\n if not database and os.path.exists(DEFAULT_CONFIG_FILE):\n conf = json.load(open(DEFAULT_CONFIG_FILE, 'r'))\n database = conf['database']\n ctx.obj.update(dict(conf=conf))\n else:\n click.echo(\"No database selected!!\")\n sys.exit(1)\n ctx.obj.update(dict(database=Database(url=database), db_url=database))", "def before_request():\n engine = cache['engine']\n try:\n g.conn = engine.connect()\n except:\n print \"error creating temporary connection to the db\"\n import traceback; traceback.print_exc()\n g.conn = None", "def before_request():\r\n try:\r\n g.conn = engine.connect()\r\n except:\r\n print(\"uh oh, problem connecting to database\")\r\n import traceback; traceback.print_exc()\r\n g.conn = None", "def before_request():\r\n try:\r\n g.conn = engine.connect()\r\n except:\r\n print(\"uh oh, problem connecting to database\")\r\n import traceback; traceback.print_exc()\r\n g.conn = None", "def before_request():\r\n try:\r\n g.conn = engine.connect()\r\n except:\r\n print(\"uh oh, problem connecting to database\")\r\n import traceback; traceback.print_exc()\r\n g.conn = None", "def before_request():\n try:\n g.conn = engine.connect()\n except:\n print \"uh oh, problem connecting to database\"\n import traceback; traceback.print_exc()\n g.conn = None", "def before_request():\n try:\n g.conn = engine.connect()\n except:\n print \"uh oh, problem connecting to database\"\n import traceback; traceback.print_exc()\n g.conn = None", "def before_request():\n try:\n g.conn = engine.connect()\n except:\n print \"uh oh, problem connecting to database\"\n import traceback; traceback.print_exc()\n g.conn = None", "def before_request():\n try:\n g.conn = engine.connect()\n except:\n print \"uh oh, problem connecting to database\"\n import traceback; traceback.print_exc()\n g.conn = None", "def query(self, query):", "def __init__(self, addr: str, db: str, create_db=True, numeric_type=str, **kwargs):\n super().__init__(addr, **kwargs)\n self.addr = f\"{addr}/write?db={db}\"\n self.session = None\n self.numeric_type = numeric_type\n\n if create_db:\n r = requests.post(f'{addr}/query', data={'q': f'CREATE DATABASE {db}'})\n r.raise_for_status()", "def calculate(context):\n context.obj['db'] = ChanjoDB(uri=context.obj['database'])", "def _run_query(self):", "def initialize(self) -> None:\n # First, establish a connection to the specified database\n try:\n self._connect_to_db()\n except psycopg2.OperationalError: # specified database does not exist\n with psycopg2.connect(database=DATABASE_ENV[\"POSTGRES_DB\"],\n user=self.dbuser, password=self.dbpassword,\n host=self.dbhost, port=str(self.dbport)) as con:\n with con.cursor() as cur:\n con.autocommit = True # cannot create db inside a transaction\n cur.execute(f'CREATE DATABASE \"{self.dbname}\"')\n con.autocommit = False\n self._connect_to_db() # try again\n\n # Second, create the necessary database table, only if required\n with self._connection.cursor() as cur:\n cur.execute(f\"\"\"\n CREATE TABLE IF NOT EXISTS \"{self.MESSAGE_TABLE_NAME}\" (\n id SERIAL PRIMARY KEY,\n key CHAR(4) NOT NULL,\n value REAL NOT NULL,\n ts TIMESTAMP NOT NULL,\n tz TEXT NOT NULL\n );\n \"\"\")\n self._connection.commit()", "def db(ctx, index, tstep, param, model, experiment, prod, action, verbose):\n \n # ensure that ctx.obj exists and is a dict (in case `cli()` is called\n # by means other than the `if` block below)\n ctx.ensure_object(dict)\n ctx.obj['log'] = cdslog\n ctx.obj['dsargs'] = define_args(index, tstep)\n if not prod:\n prod = ctx.obj['dsargs']['product_type']\n prod = expand_prod(prod)\n if action == 'update':\n for pr in prod:\n update_db(cfg, index, tstep, pr, list(experiment), list(model))\n elif action == 'delete': \n if len(prod) > 1:\n cdslog.info(f\"{len(prod)} products were passed as argument, pass only one\")\n sys.exit()\n delete_record(cfg, index, prod[0], tstep, list(experiment), list(model))\n elif action == 'list': \n #varlist = [] \n models_stats(ctx, cfg, index, tstep, prod, list(param), list(model), verbose)\n elif action == 'intake': \n #varlist = [] \n create_intake(cfg)", "def __init__(self, connection):\n self.con = connection\n self.recordset = None\n self.recordset_df = None", "def __init__(self, connection_url, echo=False):\n if not connection_url:\n raise ValueError('No database connection URL provided.')\n engine = create_engine(connection_url, echo=echo)\n PipelineRun.metadata.create_all(engine)\n self.session_factory = sessionmaker(bind=engine)", "def __init__(self):\n #self.app_process = sqlite3.connect('app_process.db', check_same_thread=False)\n self.mq_first = 0 #stores timestamp\n self.mq_last = 0 #stores timestamp\n #self.data = sqlite3.connect('data.db')\n#with conn: allows to skip commit and close\n #self.app_process_cursor = self.app_process.cursor() #creates cursor to run sql commands\n #self.data_cursor = self.data.cursor()\n self.create_nodes_table()\n self.create_message_queue_table()\n self.create_data_table()\n self.create_blockchain_table()", "def __init__(self, host, user, password, database, **kwargs):\n\n # Parse kwargs\n self.auto_flush = kwargs.pop(\"auto_flush\", True)\n self.read_only = kwargs.pop(\"read_only\", False)\n self.package_namespace = kwargs.pop(\"package_namespace\", None)\n connect_args = kwargs.pop(\"connect_args\", {})\n kwargs.pop(\"node_validator\", None)\n kwargs.pop(\"edge_validator\", None)\n self.set_flush_timestamps = kwargs.pop(\"set_flush_timestamps\", True)\n if \"isolation_level\" not in kwargs:\n kwargs[\"isolation_level\"] = \"REPEATABLE_READ\"\n if \"application_name\" in kwargs:\n connect_args[\"application_name\"] = kwargs.pop(\"application_name\")\n else:\n connect_args[\"application_name\"] = socket.gethostname()\n\n # Construct connection string\n host = \"\" if host is None else host\n conn_str = \"postgresql://{user}:{password}@{host}/{database}\".format(\n user=user, password=password, host=host, database=database\n )\n if kwargs[\"isolation_level\"] not in self.acceptable_isolation_levels:\n logging.warning(\n (\n \"Using an isolation level '{}' that is not in the list of \"\n \"acceptable isolation levels {} is not safe and should be \"\n \"avoided. Doing this can result in one session overwriting \"\n \"the commit of a concurrent session and losing data!\"\n ).format(kwargs[\"isolation_level\"], self.acceptable_isolation_levels)\n )\n\n # Create driver engine\n self.engine = create_engine(\n conn_str, encoding=\"latin1\", connect_args=connect_args, **kwargs\n )\n\n # Create context for xlocal sessions\n self.context = xlocal()", "def __init__(\n self,\n name: Optional[str] = None,\n database: Optional[str] = None,\n schema: Optional[str] = None,\n table: Optional[str] = None,\n query: Optional[str] = None,\n event_timestamp_column: Optional[str] = \"\",\n created_timestamp_column: Optional[str] = \"\",\n field_mapping: Optional[Dict[str, str]] = None,\n date_partition_column: Optional[str] = \"\",\n ):\n if table is None and query is None:\n raise ValueError('No \"table\" argument provided.')\n\n # If no name, use the table as the default name\n _name = name\n if not _name:\n if table:\n _name = table\n else:\n raise DataSourceNoNameException()\n\n super().__init__(\n _name,\n event_timestamp_column,\n created_timestamp_column,\n field_mapping,\n date_partition_column,\n )\n\n # The default Snowflake schema is named \"PUBLIC\".\n _schema = \"PUBLIC\" if (database and table and not schema) else schema\n\n self.snowflake_options = SnowflakeOptions(\n database=database, schema=_schema, table=table, query=query\n )", "def __init__(self, host, port, user, password, db_name, **kwargs):\n super().__init__()\n self.host = host\n self.port = port\n self.user = user\n self.password = password\n self.db_name = db_name\n self.schema = kwargs.get('schema', None)\n\n self.kwargs = {}\n\n for key in kwargs.keys():\n if key != 'schema':\n self.kwargs[key] = kwargs.get(key)", "def create_request_db(request, nome_request):\r\n login = request.form['login']\r\n email = request.form['email']\r\n\r\n db_dirpath = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\r\n with sql.connect(db_dirpath + '/pais_alice_requests.db') as conn:\r\n cur = conn.cursor()\r\n info_row = [(str(time.strftime(\"%d/%m/%Y %H:%M:%S\")), login, nome_request, email, 0)]\r\n cur.executemany(\r\n '''insert into requests_queue_statuses(user_req_creation_time,aliceweb_user_name,req_file_name,user_email,req_status) values(?, ?, ?, ?, ?);''',\r\n info_row)\r\n conn.commit()", "def __init__(self, database_name):\n self.conn = sqlite3.connect(\"output/%s.db\" % database_name)", "def make_sql_call(self):\n c_data = {'db_host': self.server,\n 'db_user': self.user,\n 'db_password': self.password,\n 'db_database': self.database}\n db_conn = self.SH.sql.helper.sql_conn_obj(c_data)\n result, detail = db_conn.connect()\n self.print_to_log(detail)\n result, detail = db_conn.execute(self.sql)\n db_conn.shutdown()\n self.print_to_log(detail)", "def __init__(self, table, **kwargs):\n super(PostgisIO, self).__init__(**kwargs)\n self.table = table\n self.host = kwargs.get('host') or gaia.config['gaia_postgis']['host']\n self.dbname = kwargs.get(\n 'dbname') or gaia.config['gaia_postgis']['dbname']\n self.user = kwargs.get('user') or gaia.config['gaia_postgis']['user']\n self.password = kwargs.get(\n 'password') or gaia.config['gaia_postgis']['password']\n self.engine = self.get_engine(self.get_connection_string())\n self.get_table_info()\n self.verify()", "def create_db(self):", "def query(self, **kwargs):", "def _ingest_db(c, location):\n db_name = local_db['default'].get('NAME')\n user = local_db['default'].get('USER', '')\n if user:\n user = '-u {}'.format(user)\n\n password = local_db['default'].get('PASSWORD', '')\n if password:\n password = ' -p{}'.format(password)\n\n cmd_kwargs = {\n 'db_name': db_name,\n 'user': user,\n 'location': location,\n 'password': password\n }\n\n with hide('running'):\n local(\n (\n 'echo Ingesting {location} into '\n '{db_name} database...'\n ).format(\n **cmd_kwargs\n )\n )\n local(\n 'mysql {user}{password} {db_name} < {location}'.format(\n **cmd_kwargs\n )\n )\n local(\n (\n 'echo Successfully ingested {location} '\n 'into {db_name} database!'\n ).format(\n **cmd_kwargs\n )\n )", "def mutate(self, info, input):\n # Convert input to dictionary\n data = api_utils.input_to_dictionary(input)\n data_source = Operation('ModelDataSource').create(**data)\n return CreateDataSource(data_source=data_source)", "def run(self, event, db):\n pass", "def prepare(self):\n pass", "def prepare(self):\n pass", "def prepare(self):\n pass", "def main(argv):\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n args = parse_args(argv[1:])\n database = parse_database(args.dict_file)\n\n server_socket = socket.socket(type=socket.SOCK_DGRAM)\n server_socket.bind(args.servent_address)\n\n query_creator = utils.QueryCreator()\n queries_seen = set()\n while True:\n message_data, message_origin = server_socket.recvfrom(\n utils.MAX_SERVER_MESSAGE_SIZE)\n message_type = utils.extract_message_type(message_data)\n\n if message_type == utils.MessageType.CLIREQ:\n key = utils.unpack_clireq(message_data)\n query = query_creator.new_query(key, message_origin)\n logging.info('Received clireq, new query: %s', repr(query))\n elif message_type == utils.MessageType.QUERY:\n query = utils.unpack_query(message_data)\n logging.info('Received query: %s', repr(query))\n else:\n logging.error('Server received RESPONSE message from %s',\n message_origin)\n continue\n\n if query.content in queries_seen:\n logging.info('Query already seen: %s', repr(query))\n continue\n queries_seen.add(query.content)\n\n if query.ttl > 0 and args.neighbor_addresses:\n packed_query = utils.pack_query(query)\n for neighbor_address in args.neighbor_addresses:\n if neighbor_address != message_origin:\n logging.info('Forwarding query to %s', neighbor_address)\n client_socket = socket.socket(type=socket.SOCK_DGRAM)\n client_socket.sendto(packed_query, neighbor_address)\n\n if query.content.key in database:\n response = utils.pack_response(query.content.key,\n database[query.content.key])\n logging.info('Sending response to %s', query.content.address)\n client_socket = socket.socket(type=socket.SOCK_DGRAM)\n client_socket.sendto(response, query.content.address)", "def create_datagrabber(data_path, template_path, template_args,\n field_template=None,\n infields=['subject_id', 'session_id'],\n outfields=['raw_file']):\n\n datasource = pe.Node(interface=nio.DataGrabber(infields=infields,\n outfields=outfields),\n name='datasource')\n\n datasource.inputs.base_directory = data_path\n datasource.inputs.template = template_path\n\n if field_template:\n datasource.inputs.field_template = field_template\n if type(template_args) == list:\n datasource.inputs.template_args = dict(raw_file=template_args)\n elif type(template_args) == dict:\n datasource.inputs.template_args = template_args\n\n datasource.inputs.sort_filelist = True\n\n return datasource", "def __init__(self, db):\n self.table_name = \"query_latent_space\"\n self.db = db", "def __init__(self, conn, *args, **kwargs):\n self.conn = conn\n self.db = None\n self.in_tx = False\n super(MySQLConnection.Worker, self).__init__(*args, **kwargs)", "def __init__(self, *args, **kwargs):\n self.database = args[0] if len(args) else kwargs.get('database', 'jping.db')\n is_new = not os.path.exists(self.database)\n self._connection = sqlite3.connect(self.database)\n self._connection.row_factory = sqlite3.Row\n if is_new:\n self.create_schema()", "def _get_db(self):\n gt_db = ...\n return gt_db", "def initialize(self):\r\n if not self.context:\r\n self.context = SQLContext(self.url, self.connection, self.schema)\r\n if self.table is None:\r\n self.table = self.context.table(self.table_name)\r\n if not self.fields:\r\n self.read_fields()\r\n self.field_names = self.fields.names()", "def _execute(self, db):\n raise NotImplementedError" ]
[ "0.57374924", "0.5632629", "0.5480132", "0.54712677", "0.5464478", "0.54219073", "0.54219073", "0.54219073", "0.54219073", "0.53944474", "0.5393474", "0.5370461", "0.53525424", "0.5305423", "0.5280601", "0.5280414", "0.52712566", "0.5233362", "0.5216953", "0.5216711", "0.52011853", "0.5158803", "0.515425", "0.5119249", "0.50974756", "0.5094681", "0.5090835", "0.50791377", "0.5070862", "0.5061346", "0.5059921", "0.503527", "0.50282466", "0.5023842", "0.5015775", "0.50049126", "0.4994274", "0.49819252", "0.49803948", "0.49792922", "0.49685153", "0.49574023", "0.4956942", "0.4956648", "0.49552944", "0.49537137", "0.49523973", "0.49408153", "0.49369115", "0.49327695", "0.49252295", "0.49220407", "0.4905879", "0.49034497", "0.49034497", "0.49034497", "0.49034497", "0.49034497", "0.49027315", "0.48990548", "0.48953688", "0.4892683", "0.48895493", "0.48895493", "0.48895493", "0.48893088", "0.48893088", "0.48893088", "0.48893088", "0.48887885", "0.4884041", "0.48793375", "0.48675144", "0.48640203", "0.4863279", "0.486295", "0.4862532", "0.48505798", "0.4839961", "0.4834734", "0.48342296", "0.4828526", "0.48221165", "0.48119682", "0.48103645", "0.47990024", "0.47939065", "0.47929126", "0.47863823", "0.47863674", "0.4779098", "0.4779098", "0.4779098", "0.47738448", "0.477143", "0.4764457", "0.47632825", "0.4741263", "0.4741063", "0.4734525", "0.47313055" ]
0.0
-1
From topology. Prepare a database schema request and pass it to the sink
def yield_database_schema( self, schema_name: str ) -> Iterable[CreateDatabaseSchemaRequest]: yield CreateDatabaseSchemaRequest( name=schema_name, database=EntityReference(id=self.context.database.id, type="database"), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_schema(command, conf, vars):", "def __init__(self, schema ):\n self.schema = schema", "def _prepare_schema(self):\n schema = DaskSchema(self.schema_name)\n\n if not self.tables:\n logger.warning(\"No tables are registered.\")\n\n for name, dc in self.tables.items():\n table = DaskTable(name)\n df = dc.df\n logger.debug(\n f\"Adding table '{name}' to schema with columns: {list(df.columns)}\"\n )\n for column in df.columns:\n data_type = df[column].dtype\n sql_data_type = python_to_sql_type(data_type)\n\n table.addColumn(column, sql_data_type)\n\n schema.addTable(table)\n\n if not self.functions:\n logger.debug(\"No custom functions defined.\")\n\n for function_description in self.function_list:\n name = function_description.name\n sql_return_type = python_to_sql_type(function_description.return_type)\n if function_description.aggregation:\n logger.debug(f\"Adding function '{name}' to schema as aggregation.\")\n dask_function = DaskAggregateFunction(name, sql_return_type)\n else:\n logger.debug(f\"Adding function '{name}' to schema as scalar function.\")\n dask_function = DaskScalarFunction(name, sql_return_type)\n\n dask_function = self._add_parameters_from_description(\n function_description, dask_function\n )\n\n schema.addFunction(dask_function)\n\n return schema", "def _create_schema(self):\n self._conn.executescript(self._db_schema)", "def __init__(self, schema=None):\n self.schema = schema or {}", "def create_schema(self, schema: str):\n return", "def schema(self):", "def schema() -> None:\n pass", "def main():\n cur, conn = connect('dwh.cfg')\n \n set_schema = schema_queries[1]\n cur.execute(set_schema)\n \n print('Loading Staging Tables.')\n load_staging_tables(cur, conn)\n \n print('Inserting Rows.')\n insert_tables(cur, conn)\n\n \n conn.close()", "def main(db_path, schema_json):\n create_db(db_path, schema_json)", "def schema(self):\n pass", "def create_staging_schema(cursor,table_schema):\n create_schema = \"CREATE SCHEMA IF NOT EXISTS \" + table_schema + \";\"\n cursor.execute(create_schema)", "def __init__(\n self,\n server_name,\n schema,\n database,\n staging_bucket_name,\n storage_integration_name,\n create_disposition,\n write_disposition,\n table_schema,\n user_data_mapper,\n username=None,\n password=None,\n private_key_path=None,\n raw_private_key=None,\n private_key_passphrase=None,\n o_auth_token=None,\n table=None,\n query=None,\n role=None,\n warehouse=None,\n expansion_service=None,\n ):\n # pylint: disable=line-too-long\n verify_credentials(\n username=username,\n password=password,\n private_key_path=private_key_path,\n raw_private_key=raw_private_key,\n o_auth_token=o_auth_token,\n )\n WriteDisposition.VerifyParam(write_disposition)\n CreateDisposition.VerifyParam(create_disposition)\n\n self.params = WriteToSnowflakeSchema(\n server_name=server_name,\n schema=schema,\n database=database,\n staging_bucket_name=staging_bucket_name,\n storage_integration_name=storage_integration_name,\n create_disposition=create_disposition,\n write_disposition=write_disposition,\n table_schema=table_schema,\n username=username,\n password=password,\n private_key_path=private_key_path,\n raw_private_key=raw_private_key,\n private_key_passphrase=private_key_passphrase,\n o_auth_token=o_auth_token,\n table=table,\n query=query,\n role=role,\n warehouse=warehouse,\n )\n self.user_data_mapper = user_data_mapper\n self.expansion_service = expansion_service or default_io_expansion_service()", "def create_schema(self, schema, *, debug=False):\n c = self.conn.cursor()\n for line in schema.split(\";\"):\n line = line.strip()\n if len(line)>0:\n if self.debug or debug:\n print(f\"{line};\", file=sys.stderr)\n try:\n c.execute(line)\n except (sqlite3.Error, pymysql.MySQLError) as e:\n print(\"SQL:\", line, file=sys.stderr)\n print(\"Error:\", e, file=sys.stderr)\n exit(1)", "def _setup_origin_table(self):\n if self._create_table_if_not_exists(self.dataset):\n return\n\n directory, pipeline_builder = self._directory_origin(MAX_CONCURRENCY)\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer', type='destination')\n jdbc_producer.set_attributes(default_operation=\"INSERT\",\n field_to_column_mapping=[],\n enclose_object_names=True,\n use_multi_row_operation=True,\n statement_parameter_limit=32768,\n table_name=self.dataset)\n\n directory >> jdbc_producer\n\n pipeline = pipeline_builder.build().configure_for_environment(self.environments['database'])\n self.sdc_executor.add_pipeline(pipeline)\n self.sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(self.record_count, timeout_sec=LOAD_TIMEOUT)\n self.sdc_executor.stop_pipeline(pipeline)\n self.sdc_executor.remove_pipeline(pipeline)", "def setup_schema(command, conf, vars):\n import ming\n import allura\n\n # turbogears has its own special magic wired up for its globals, can't use a regular Registry\n tgl = RequestLocals()\n tgl.tmpl_context = EmptyClass()\n tgl.app_globals = config['tg.app_globals']\n tg.request_local.context._push_object(tgl)\n\n REGISTRY.prepare()\n REGISTRY.register(allura.credentials, allura.lib.security.Credentials())\n\n configure_ming(conf)\n if asbool(conf.get('activitystream.recording.enabled', False)):\n activitystream.configure(**h.convert_bools(conf, prefix='activitystream.'))\n # Nothing to do\n log.info('setup_schema called')", "def __init__(self, host, port, user, password, db_name, **kwargs):\n super().__init__()\n self.host = host\n self.port = port\n self.user = user\n self.password = password\n self.db_name = db_name\n self.schema = kwargs.get('schema', None)\n\n self.kwargs = {}\n\n for key in kwargs.keys():\n if key != 'schema':\n self.kwargs[key] = kwargs.get(key)", "def schema(self, schema):\n self._schema = schema", "def destination_schema(self) -> pulumi.Input['ApplicationApplicationConfigurationSqlApplicationConfigurationOutputDestinationSchemaArgs']:\n return pulumi.get(self, \"destination_schema\")", "def do_export_schema(self):\n export_schema = self.get_arg_value(\"export_schema\")\n\n if export_schema:\n row = {\"schemas\": self.final_schemas}\n self.write_rows(rows=row)\n del row", "def schema_generators():\n return {\n \"trips\": trips_schema,\n \"status_changes\": status_changes_schema,\n \"events\": events_schema,\n \"vehicles\": vehicles_schema,\n \"stops\": stops_schema\n }", "def __init__(self, schema, input_files, output_path):\n self.schema = schema\n self.input_files = input_files\n self.output_path = output_path", "def initialise_schema(db_name: str, password: str):\n conn = psycopg2.connect(host='localhost', dbname=db_name, user='postgres', password=password)\n cursor = conn.cursor()\n cursor.execute(_query)\n conn.commit()\n conn.close()\n\n print('Database schema was created successfully!\\n')", "def _jdbc_producer_destination(self, pipeline_builder):\n self.destination_system = self.environments['database'].engine.dialect.name\n self.destination_format = None\n\n table_name = get_random_string().lower()\n self._create_table_if_not_exists(table_name)\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer', type='destination')\n jdbc_producer.set_attributes(default_operation=\"INSERT\",\n field_to_column_mapping=[],\n enclose_object_names=True,\n use_multi_row_operation=True,\n statement_parameter_limit=32768,\n table_name=table_name)\n query = f'TRUNCATE TABLE {table_name}'\n stop_stage = pipeline_builder.add_stop_event_stage('JDBC Query')\n if Version(self.sdc_builder.version) < Version('3.14.0'):\n stop_stage.set_attributes(sql_query=query)\n else:\n stop_stage.set_attributes(sql_queries=[query])\n return jdbc_producer, pipeline_builder", "def get_schema() -> Dict[str, type]:\n schema: Dict[str, type] = {}\n\n # Add all columns from pipeline configs\n for pipeline in get_pipelines():\n schema.update(pipeline.schema)\n\n # Add new columns from adapter\n for col_old, col_new in OUTPUT_COLUMN_ADAPTER.items():\n if col_old in schema and col_new is not None:\n schema[col_new] = schema[col_old]\n\n return schema", "def _jdbc_query_origin(self):\n self.origin_system = self.environments['database'].engine.dialect.name\n self._setup_origin_table()\n pipeline_builder = self.sdc_builder.get_pipeline_builder()\n jdbc_query_consumer = pipeline_builder.add_stage('JDBC Query Consumer', type='origin')\n jdbc_query_consumer.set_attributes(incremental_mode=False,\n sql_query=f'SELECT * FROM {self.dataset}')\n return jdbc_query_consumer, pipeline_builder", "def schema(self):\n raise NotImplementedError", "def initialize_schema(self, dry_run=False):\n if not dry_run:\n self.flush()", "def build_input_db():\n build_input_database()", "def __init__(\n self,\n name: Optional[str] = None,\n database: Optional[str] = None,\n schema: Optional[str] = None,\n table: Optional[str] = None,\n query: Optional[str] = None,\n event_timestamp_column: Optional[str] = \"\",\n created_timestamp_column: Optional[str] = \"\",\n field_mapping: Optional[Dict[str, str]] = None,\n date_partition_column: Optional[str] = \"\",\n ):\n if table is None and query is None:\n raise ValueError('No \"table\" argument provided.')\n\n # If no name, use the table as the default name\n _name = name\n if not _name:\n if table:\n _name = table\n else:\n raise DataSourceNoNameException()\n\n super().__init__(\n _name,\n event_timestamp_column,\n created_timestamp_column,\n field_mapping,\n date_partition_column,\n )\n\n # The default Snowflake schema is named \"PUBLIC\".\n _schema = \"PUBLIC\" if (database and table and not schema) else schema\n\n self.snowflake_options = SnowflakeOptions(\n database=database, schema=_schema, table=table, query=query\n )", "def __init__(self, source, schema, show_all=False):\n self.source = source\n self.schema = schema\n self.show_all = show_all\n self._saved_columns = None", "def set_schema(self, schema):\r\n self.__schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def init_db():\n with LoggerApi.app_context():\n db = get_db()\n with LoggerApi.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def createSchema(schema):\n return \"CREATE SCHEMA \\\"{name}\\\";\\n\".format(name = schema.name)", "def create_schema(conn, schemapath):\n with open(schemapath, 'r') as f:\n sql = f.read()\n with conn.cursor() as curs:\n curs.execute(sql)", "def __init__(self, *args):\n _snap.Schema_swiginit(self, _snap.new_Schema(*args))", "def get_schema(self) -> dict:", "async def upgradeSchema(self) -> None:", "async def get_schema(request: Request, namespace: str, project: str):\n # endpoint to schema.databio.org/...\n # like pipelines/ProseqPEP.yaml\n\n try:\n schema = eido.read_schema(\n f\"https://schema.databio.org/{namespace}/{project}.yaml\"\n )[0]\n except IndexError:\n raise HTTPException(status_code=404, detail=\"Schema not found\")\n\n return schema", "def main(input_file, output):\n path = pathlib.Path(input_file)\n click.echo(\n click.style(f\"Read a datapackage: \", fg=\"green\")\n + click.style(f\"{path}\", fg=\"green\", bold=True)\n )\n package = datapackage.Package(str(path))\n header = jinja2.Template(TEMPLATE_SQL_HEADER).render(\n now=datetime.datetime.now(), tables=package.resource_names\n )\n output.write(header)\n template = jinja2.Template(TEMPLATE_SQL_CREATE)\n for r in package.resources:\n s = r.schema\n click.echo(\n click.style(f\"Resource \", fg=\"blue\")\n + click.style(f\"{r.name}\", fg=\"blue\", bold=True)\n + click.style(f\" has \", fg=\"blue\")\n + click.style(f\"{len(s.fields)}\", fg=\"blue\", bold=True)\n + click.style(f\" fields\", fg=\"blue\")\n )\n path = None\n if r.local:\n path = r.source\n output.write(\n template.render(\n name=r.name, title=r.descriptor.get(\"title\"), fields=s.fields, path=path\n )\n )\n output.write(\"\\n\")", "def __init__(self, db):\n assert db.schema.id == self.schema, \\\n _('invalid schema: %r instead of %r' % (\n db.schema.id, self.schema))", "def create(self):\n c = self.cursor()\n byte_schema = pkgutil.get_data(__package__, 'schema.sql')\n c.executescript(byte_schema.decode('utf-8'))\n self.commit()", "def create_schema(self, schema):\n sql = f'set role {self.write_role}; ' \\\n + f'CREATE SCHEMA IF NOT EXISTS {schema};'\n return sql", "def setup_schema(self):\n models.Base.metadata.create_all(self.session.bind)", "def __init__(self, connection=None, url=None,\r\n table=None, statement=None, schema=None, autoinit = True,\r\n **options):\r\n\r\n super(SQLDataSource, self).__init__()\r\n\r\n if not table and not statement:\r\n raise AttributeError(\"Either table or statement should be \" \\\r\n \"provided for SQL data source\")\r\n\r\n if statement:\r\n raise NotImplementedError(\"SQL source stream based on statement \" \\\r\n \"is not yet implemented\")\r\n\r\n if not options:\r\n options = {}\r\n\r\n self.url = url\r\n self.connection = connection\r\n\r\n self.table_name = table\r\n self.statement = statement\r\n self.schema = schema\r\n self.options = options\r\n\r\n self.context = None\r\n self.table = None\r\n self.fields = None\r\n\r\n if autoinit:\r\n self.initialize()", "def init_test_schema(db_parameters) -> Generator[None, None, None]:\n ret = db_parameters\n with snowflake.connector.connect(\n user=ret[\"user\"],\n password=ret[\"password\"],\n host=ret[\"host\"],\n port=ret[\"port\"],\n database=ret[\"database\"],\n account=ret[\"account\"],\n protocol=ret[\"protocol\"],\n ) as con:\n con.cursor().execute(f\"CREATE SCHEMA IF NOT EXISTS {TEST_SCHEMA}\")\n yield\n con.cursor().execute(f\"DROP SCHEMA IF EXISTS {TEST_SCHEMA}\")", "def create_schema(schema): \n\n query = \"CREATE SCHEMA IF NOT EXISTS {}\".format(schema)\n qdb.execute(query)", "def _create_input_data(self):\n SCHEMA = parse_table_schema_from_json(\n '{\"fields\": [{\"name\": \"data\", \"type\": \"BYTES\"}]}')\n\n def format_record(record):\n # Since Synthetic Source returns data as a dictionary, we should skip one\n # of the part\n import base64\n return {'data': base64.b64encode(record[1])}\n\n with TestPipeline() as p:\n ( # pylint: disable=expression-not-assigned\n p\n | 'Produce rows' >> Read(\n SyntheticSource(self.parse_synthetic_source_options()))\n | 'Format' >> Map(format_record)\n | 'Write to BigQuery' >> WriteToBigQuery(\n dataset=self.input_dataset,\n table=self.input_table,\n schema=SCHEMA,\n create_disposition=BigQueryDisposition.CREATE_IF_NEEDED,\n write_disposition=BigQueryDisposition.WRITE_EMPTY))", "def load_schema_dataset(self, dataset_raw):\r\n\r\n self._dataset_raw = dataset_raw\r\n return self\r\n # self._parse_schemas_raw()\r\n # print(schemas)\r", "def _get_schema(self):\n\n schema = ProtocolSchema()\n\n schema.id = self.id\n schema.type = type(self).__name__\n\n for input_path in self.required_inputs:\n\n if not (input_path.start_protocol is None or (input_path.start_protocol == self.id and\n input_path.start_protocol == input_path.last_protocol)):\n\n continue\n\n # Always make sure to only pass a copy of the input. Changing the schema\n # should NOT change the protocol.\n schema.inputs[input_path.full_path] = copy.deepcopy(self.get_value(input_path))\n\n return schema", "def input_schema(self) -> pulumi.Input['ApplicationApplicationConfigurationSqlApplicationConfigurationInputInputSchemaArgs']:\n return pulumi.get(self, \"input_schema\")", "def _generate_schema(self):\n\n response = self._request('GET', CosmoSim.SCHEMA_URL,\n auth=(self.username, self.password),\n headers={'Accept': 'application/json'},\n cache=False)\n data = response.json()\n self.db_dict = {}\n for i in range(len(data['databases'])):\n self.db_dict[str(data['databases'][i]['name'])] = {}\n\n sstr = str(data['databases'][i]['name'])\n sid = str(data['databases'][i]['id'])\n self.db_dict[sstr]['id'] = sid\n sdesc = str(data['databases'][i]['description'])\n self.db_dict[sstr]['description'] = sdesc\n self.db_dict[sstr]['tables'] = {}\n for j in range(len(data['databases'][i]['tables'])):\n sstr2 = str(data['databases'][i]['tables'][j]['name'])\n self.db_dict[sstr]['tables'][sstr2] = {}\n sdata = data['databases'][i]['tables'][j]['id']\n self.db_dict[sstr]['tables'][sstr2]['id'] = sdata\n sdesc2 = data['databases'][i]['tables'][j]['description']\n self.db_dict[sstr]['tables'][sstr2]['description'] = sdesc2\n self.db_dict[sstr]['tables'][sstr2]['columns'] = {}\n tmpval = len(data['databases'][i]['tables'][j]['columns'])\n for k in range(tmpval):\n sdata2 = data['databases'][i]['tables'][j]['columns'][k]\n sdata2_id = sdata2['id']\n sstr3 = str(sdata2['name'])\n\n sdesc3 = sdata2['description']\n self.db_dict[sstr]['tables'][sstr2]['columns'][sstr3] = {\n 'id': sdata2_id,\n 'description': sdesc3}\n return response", "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n logging.info(\"****DuplicatesPipeline: database connected****\")", "def generate_query(schema):\n q = None\n if schema:\n q = \"CREATE SCHEMA\"\n if schema.if_not_exists:\n q = \"{} IF NOT EXISTS\".format(q)\n if schema.name:\n q = \"{} {}\".format(q, schema.name)\n if schema.authorization:\n q = \"{} AUTHORIZATION {}\".format(q, schema.authorization)\n return q", "def schema(self, schema, in_='formData'):\n parameters = core.parameters_from_object_schema(schema, in_=in_)\n return compose(*map(self.parameter, parameters))", "def _get_schema_using_query(self, query: str) -> sch.Schema:\n return sch.Schema.from_tuples(self._metadata(query))", "def ProcessSchemaUpdate(ref, args, request):\n table = request.table\n relaxed_columns = args.relax_columns\n if not table.schema and not relaxed_columns: # if not updating schema,\n return request # then just return.\n\n original_schema = _TryGetCurrentSchema(ref.Parent().Name(),\n ref.Name(),\n ref.projectId)\n\n new_schema_columns = table.schema\n updated_fields = _GetUpdatedSchema(original_schema,\n new_schema_columns,\n relaxed_columns)\n\n table_schema_type = GetApiMessage('TableSchema')\n request.table.schema = table_schema_type(fields=updated_fields)\n\n return request", "def _Dynamic_GetSchema(self, req, schema, request_id=None):\n # This is not used, but it is required for the method signature.\n del request_id\n\n app_str = req.app()\n self.__ValidateAppId(app_str)\n schema.set_more_results(False)", "def schema_view(request):\n generator = schemas.SchemaGenerator(title='Experiment Data Depot')\n return response.Response(generator.get_schema(request=request))", "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n logging.info(\"****SaveRestaurantsPipeline: database connected****\")", "def create_db_execute(self):\n self.execute(query=self.db_create_schema.format(self.db_name))", "def startConnection(self):\n try:\n self.conn = psycopg2.connect(\"dbname='library' user='postgres' host='localhost' password='Codechef'\")\n # self.conn = psycopg2.connect(\"dbname='library' user='postgres' host='localhost' password='Codechef'\")\n # self.conn = psycopg2.connect(\"dbname='db_b130974cs' user='postgres' host='localhost' password='Codechef'\")\n except:\n print \"I am unable to connect to the database\"\n print \"connected to database...\"\n self.schema = SchemaGraph(self.conn)", "def __init__(self,schema_name = 'null'):\n\t\tself.connected = False\n\t\tself.__schema_name = ''\n\t\tself.__db = ''\n\t\tself.__cursor = ''\n\t\tself.__engine = ''\n\t\tif schema_name != 'null':\n\t\t\tself.connect(schema_name)", "def _create_jdbc_producer_pipeline(pipeline_builder, pipeline_title, raw_data, table_name, operation):\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n\n FIELD_MAPPINGS = [dict(field='/id', columnName='id'),\n dict(field='/name', columnName='name')]\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer')\n jdbc_producer.set_attributes(default_operation=operation,\n table_name=table_name,\n field_to_column_mapping=FIELD_MAPPINGS,\n stage_on_record_error='STOP_PIPELINE')\n\n trash = pipeline_builder.add_stage('Trash')\n dev_raw_data_source >> record_deduplicator >> jdbc_producer\n record_deduplicator >> trash\n\n return pipeline_builder.build(title=pipeline_title)", "def fetch_schema(self) -> None:\n if self.schema_file:\n logger.info(\"Loaded schema from file '%s'\", self.schema_file)\n self._schema = load_schema_file(self.schema_file)\n else:\n url = self.schema_url or urljoin(self.base_url, \"schema/openapi.yaml\")\n logger.info(\"Fetching schema at '%s'\", url)\n self._schema = schema_fetcher.fetch(url, {\"v\": \"3\"})", "def before_request():\n\tg.db = sql.connect(host=cfg.dbhost, port=cfg.dbport, user=cfg.user,\\\n\t\tpasswd=cfg.password, db=cfg.database,\\\n\t\tcharset=cfg.charset)", "def __init__(\n self,\n server_name,\n schema,\n database,\n staging_bucket_name,\n storage_integration_name,\n csv_mapper,\n username=None,\n password=None,\n private_key_path=None,\n raw_private_key=None,\n private_key_passphrase=None,\n o_auth_token=None,\n table=None,\n query=None,\n role=None,\n warehouse=None,\n expansion_service=None):\n verify_credentials(\n username=username,\n password=password,\n private_key_path=private_key_path,\n raw_private_key=raw_private_key,\n o_auth_token=o_auth_token,\n )\n\n self.params = ReadFromSnowflakeSchema(\n server_name=server_name,\n schema=schema,\n database=database,\n staging_bucket_name=staging_bucket_name,\n storage_integration_name=storage_integration_name,\n username=username,\n password=password,\n private_key_path=private_key_path,\n raw_private_key=raw_private_key,\n private_key_passphrase=private_key_passphrase,\n o_auth_token=o_auth_token,\n table=table,\n query=query,\n role=role,\n warehouse=warehouse,\n )\n self.csv_mapper = csv_mapper\n self.expansion_service = expansion_service or default_io_expansion_service()", "def _read_schema(self):\n schema = self.SCHEMA[self.action]\n assert_keys_match(self.op.keys(), schema, allow_missing=False)\n if 'community' in schema: self._read_community()\n if 'account' in schema: self._read_account()\n if 'permlink' in schema: self._read_permlink()\n if 'role' in schema: self._read_role()\n if 'notes' in schema: self._read_notes()\n if 'title' in schema: self._read_title()\n if 'props' in schema: self._read_props()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def get_source_schema(cls) -> dict:\n source_schema = get_base_schema(\n root=True,\n id_=\"source.schema.json\",\n title=\"Source data schema\",\n description=\"Schema for the source data, files and directories\",\n version=\"0.1.0\",\n )\n for interface_name, data_interface in cls.data_interface_classes.items():\n source_schema[\"properties\"].update({interface_name: unroot_schema(data_interface.get_source_schema())})\n return source_schema", "def init_db():\n with app.app_context():\n db = connect_db()\n with app.open_resource('schema.sql') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def dbinit( *args, **kwargs ):", "def __init__(self, *args, **kwargs):\n self.database = args[0] if len(args) else kwargs.get('database', 'jping.db')\n is_new = not os.path.exists(self.database)\n self._connection = sqlite3.connect(self.database)\n self._connection.row_factory = sqlite3.Row\n if is_new:\n self.create_schema()", "def __init__(self, url=None, connection=None, schema=None):\r\n\r\n if not url and not connection:\r\n raise AttributeError(\"Either url or connection should be provided\" \\\r\n \" for SQL data source\")\r\n\r\n super(SQLContext, self).__init__()\r\n\r\n if connection:\r\n self.connection = connection\r\n self.should_close = False\r\n else:\r\n engine = sqlalchemy.create_engine(url)\r\n self.connection = engine.connect()\r\n self.should_close = True\r\n\r\n self.metadata = sqlalchemy.MetaData()\r\n self.metadata.bind = self.connection.engine\r\n self.schema = schema", "def main(conn, label_config, table_name, start_date, end_date,\r\n preprocessing_prefix):\r\n label_sql = label_config['query']\r\n label_sql = label_sql.replace('{prefix}', preprocessing_prefix)\r\n label_sql = label_sql.replace('{start_date}', start_date)\r\n label_sql = label_sql.replace('{end_date}', end_date)\r\n drop_sql = f'drop table if exists {table_name};'\r\n create_sql = f'create table {table_name} as ({label_sql});'\r\n sql.run_sql_from_string(conn, drop_sql)\r\n sql.run_sql_from_string(conn, create_sql)", "def serializeSchemaContext(schema_context, event=None):\n # find the FTI and model\n fti = schema_context.fti\n schemaName = schema_context.schemaName\n schema = schema_context.schema\n model = fti.lookupModel()\n\n # synchronize changes to the model\n syncSchema(schema, model.schemata[schemaName], overwrite=True)\n fti.model_source = serializeModel(model)", "def get_schema(schema): # noqa: E501\n return 'do some magic!'", "def test_generate_target_schema():\n target = DummyTarget()\n # change the spec\n spec = QCSpec(method=\"ani2x\", basis=None, program=\"torchani\")\n target.qc_spec = spec\n schema = target.generate_target_schema()\n assert schema.target_name == target.name\n assert schema.qc_spec == target.qc_spec\n assert schema.collection_workflow == target.collection_workflow\n assert schema.settings == target.dict()", "def readjamschema(schema):\n raise NotImplementedError(msg)", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def setup_schema(BaseDao, session):\n def setup_schema_fn():\n for class_ in BaseDao._decl_class_registry.values():\n if hasattr(class_, '__tablename__'):\n if class_.__name__.endswith('Schema'):\n raise ModelConversionError(\n \"For safety, setup_schema can not be used when a\"\n \"Model class ends with 'Schema'\"\n )\n\n class Meta(object):\n model = class_\n sqla_session = session\n dump_only = ('pkId', 'created', 'modified')\n\n schema_class_name = '%sSchema' % class_.__name__\n\n schema_class = type(\n schema_class_name,\n (ModelSchema,),\n {'Meta': Meta}\n )\n\n setattr(class_, '__marshmallow__', schema_class)\n\n return setup_schema_fn", "def prepare(self, db):\n raise NotImplementedError('GenericEngine.prepare is an abstract method.')", "def _load_schema(self, mode=\"staging\"):\n\n self._check_mode(mode)\n\n json_path = self.table_folder / f\"schema-{mode}.json\"\n columns = self.table_config[\"columns\"]\n\n if mode == \"staging\":\n new_columns = []\n for c in columns:\n # case is_in_staging are None then must be True\n is_in_staging = (\n True if c.get(\"is_in_staging\") is None else c[\"is_in_staging\"]\n )\n # append columns declared in table_config.yaml to schema only if is_in_staging: True\n if is_in_staging and not c.get(\"is_partition\"):\n c[\"type\"] = \"STRING\"\n new_columns.append(c)\n\n del columns\n columns = new_columns\n\n elif mode == \"prod\":\n schema = self._get_table_obj(mode).schema\n\n # get field names for fields at schema and at table_config.yaml\n column_names = [c[\"name\"] for c in columns]\n schema_names = [s.name for s in schema]\n\n # check if there are mismatched fields\n not_in_columns = [name for name in schema_names if name not in column_names]\n not_in_schema = [name for name in column_names if name not in schema_names]\n\n # raise if field is not in table_config\n if not_in_columns:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in table_config.yaml. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_columns,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # raise if field is not in schema\n if not_in_schema:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in publish.sql. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_schema,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # if field is in schema, get field_type and field_mode\n for c in columns:\n for s in schema:\n if c[\"name\"] == s.name:\n c[\"type\"] = s.field_type\n c[\"mode\"] = s.mode\n break\n ## force utf-8, write schema_{mode}.json\n json.dump(columns, (json_path).open(\"w\", encoding=\"utf-8\"))\n\n # load new created schema\n return self.client[f\"bigquery_{mode}\"].schema_from_json(str(json_path))", "def parseToDb(self):\n self.cursor.execute('''DROP TABLE IF EXISTS policy''')\n self.cursor.execute('''CREATE TABLE policy\n (name text, src text, dst text, services text, action INTEGER)''')", "def __init__(__self__, *,\n database_name: str,\n machine_name: str,\n server_name: str,\n source: str,\n source_computer_id: str,\n vmuuid: str,\n workspace_id: str):\n pulumi.set(__self__, \"database_name\", database_name)\n pulumi.set(__self__, \"machine_name\", machine_name)\n pulumi.set(__self__, \"server_name\", server_name)\n pulumi.set(__self__, \"source\", 'OnPremiseSql')\n pulumi.set(__self__, \"source_computer_id\", source_computer_id)\n pulumi.set(__self__, \"vmuuid\", vmuuid)\n pulumi.set(__self__, \"workspace_id\", workspace_id)", "def add_schema_copying_to_pipeline(pipeline: Pipeline, schema_name,\n source_db_alias: str, target_db_alias: str,\n max_number_of_parallel_tasks: int = 4):\n task_id = \"copy_schema\"\n description = f\"Copies the {schema_name} schema to the {target_db_alias} db\"\n commands = []\n if pipeline.final_node:\n assert (isinstance(pipeline.final_node, Task))\n description = pipeline.final_node.description + ' + ' + description\n task_id = pipeline.final_node.id + '_and_' + task_id\n commands = pipeline.final_node.commands\n pipeline.remove(pipeline.final_node)\n\n pipeline.add_final(\n ParallelCopySchema(id=task_id, description=description, schema_name=schema_name,\n source_db_alias=source_db_alias, target_db_alias=target_db_alias,\n max_number_of_parallel_tasks=max_number_of_parallel_tasks,\n commands_before=commands[:-1], commands_after=commands[-1:]))", "def test_custom_metadata_schema(self):\n # The use-case for this functionality is to allow using\n # Foreign Data Wrappers, each with a full set of Django\n # tables, to copy between databases using SQLAlchemy\n # and the automatically generation of aldjemy.\n metadata = MetaData(schema=\"arbitrary\")\n sa_models = construct_models(metadata)\n self.assertEqual(sa_models[Log].__table__.schema, \"arbitrary\")", "def init_db():\n with closing(connect_db()) as db:\n with app.open_resource('schema.sql') as fobj:\n db.cursor().executescript(fobj.read())\n db.commit()", "def generate_sql_schema(self, schema, schema_name, psql_tables_path):\n psql_tables = open(psql_tables_path, 'w')\n psql_tables.write(\"SET client_min_messages TO WARNING;\\n\")\n psql_tables.write(\"DROP SCHEMA IF EXISTS %s CASCADE;\\n\" % schema_name)\n psql_tables.write(\"CREATE SCHEMA IF NOT EXISTS %s;\\n\" % schema_name)\n psql_tables.write(\"SET SCHEMA '%s';\\n\" % schema_name)\n psql_tables.write(\"CREATE EXTENSION \\\"unaccent\\\";\\n\\n\")\n\n for table_name, table_attr in schema['tables'].iteritems():\n psql_tables.write(\"\\n-- CREATE TABLE %s \\n %s \\n %s \\n\" % (\n table_attr['name'], self._get_sql_drop_table(table_attr),\n self._get_sql_create_table(table_attr)\n ))\n\n psql_tables.close()", "def __init__(self,\n stats: channel.Channel,\n name: Text = None,\n outputs: Dict[Text, channel.Channel] = None):\n component_name = 'SchemaGen'\n input_dict = {'stats': channel.as_channel(stats)}\n exec_properties = {}\n super(SchemaGen, self).__init__(\n component_name=component_name,\n unique_name=name,\n driver=base_driver.BaseDriver,\n executor=executor.Executor,\n input_dict=input_dict,\n outputs=outputs,\n exec_properties=exec_properties)", "def create_schema(query_root, host, port, db_name, user, password):\n try:\n conn = PGDB(host, port, db_name, user, password)\n try:\n conn.executeQueryFromFile(os.path.join(query_root, PREP_QUERY_DIR, \"create_tbl.sql\"))\n except Exception as e:\n print(\"unable to run create tables. %s\" % e)\n return 1\n conn.commit()\n conn.close()\n except Exception as e:\n print(\"unable to connect to the database. %s\" % e)\n return 1" ]
[ "0.6031849", "0.60288286", "0.56962603", "0.5674572", "0.565673", "0.5652133", "0.563791", "0.56235284", "0.5571399", "0.55672354", "0.5506195", "0.54994935", "0.5494928", "0.5465217", "0.5447709", "0.5426592", "0.54234093", "0.53967094", "0.53693163", "0.5365079", "0.53575385", "0.53325516", "0.5329885", "0.5317773", "0.53162116", "0.53048575", "0.5267915", "0.52519", "0.52429414", "0.52427447", "0.5239421", "0.5209686", "0.519761", "0.519761", "0.519761", "0.51933736", "0.5191473", "0.51732165", "0.51729023", "0.51669514", "0.51512796", "0.514907", "0.51357955", "0.5129287", "0.5129243", "0.5119083", "0.5118281", "0.51079535", "0.5104966", "0.5093492", "0.5054248", "0.50463134", "0.5035934", "0.5030185", "0.50288427", "0.5026447", "0.50241417", "0.5022235", "0.5020809", "0.5010158", "0.50064796", "0.49982646", "0.49956417", "0.49903646", "0.49853808", "0.4984764", "0.49832714", "0.49810448", "0.4977545", "0.49748904", "0.4972417", "0.4971294", "0.49711105", "0.4945143", "0.49431694", "0.49421117", "0.4941387", "0.4941011", "0.4933621", "0.49252206", "0.49246013", "0.49208903", "0.49106708", "0.49106708", "0.49106708", "0.49106708", "0.49106708", "0.49106708", "0.49106708", "0.48914823", "0.488843", "0.48882473", "0.48834923", "0.4882782", "0.48796856", "0.4875521", "0.48733094", "0.48732033", "0.48718172", "0.48695767" ]
0.54011047
17
Connect to the source database to get the table name and type. By default, use the inspector method to get the names and pass the Regular type. This is useful for sources where we need finegrained logic on how to handle table types, e.g., external, foreign,...
def query_table_names_and_types( self, schema_name: str ) -> Iterable[TableNameAndType]: return [ TableNameAndType(name=table_name) for table_name in self.inspector.get_table_names(schema_name) or [] ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_tabletype(cls) -> str:\n raise NotImplementedError", "def get_tables_name_and_type(self) -> Optional[Iterable[Tuple[str, str]]]:\n try:\n schema_name = self.context.database_schema.name.__root__\n if self.source_config.includeTables:\n for table_and_type in self.query_table_names_and_types(schema_name):\n table_name = self.standardize_table_name(\n schema_name, table_and_type.name\n )\n table_fqn = fqn.build(\n self.metadata,\n entity_type=Table,\n service_name=self.context.database_service.name.__root__,\n database_name=self.context.database.name.__root__,\n schema_name=self.context.database_schema.name.__root__,\n table_name=table_name,\n skip_es_search=True,\n )\n if filter_by_table(\n self.source_config.tableFilterPattern,\n table_fqn\n if self.source_config.useFqnForFiltering\n else table_name,\n ):\n self.status.filter(\n table_fqn,\n \"Table Filtered Out\",\n )\n continue\n yield table_name, table_and_type.type_\n\n if self.source_config.includeViews:\n for view_name in self.inspector.get_view_names(schema_name):\n view_name = self.standardize_table_name(schema_name, view_name)\n view_fqn = fqn.build(\n self.metadata,\n entity_type=Table,\n service_name=self.context.database_service.name.__root__,\n database_name=self.context.database.name.__root__,\n schema_name=self.context.database_schema.name.__root__,\n table_name=view_name,\n )\n\n if filter_by_table(\n self.source_config.tableFilterPattern,\n view_fqn\n if self.source_config.useFqnForFiltering\n else view_name,\n ):\n self.status.filter(\n view_fqn,\n \"Table Filtered Out\",\n )\n continue\n yield view_name, TableType.View\n except Exception as err:\n logger.warning(\n f\"Fetching tables names failed for schema {schema_name} due to - {err}\"\n )\n logger.debug(traceback.format_exc())", "def _load_db(self):\n for type_ in self._types:\n try:\n type_.table(self._metadata)\n except InvalidRequestError:\n pass\n # Reflect metadata so auto-mapping works\n self._metadata.reflect(self._engine)\n # Make sure the tables exist\n self._metadata.create_all()", "def db_table(self):", "def getTable(self, name: str):\n query = f\"SELECT * FROM '{name}';\"\n result = sql.executeAndReadQuery(self.connection, query)\n return result", "def _get_source(self, tipo):\n\n if self._match_array(tipo, self.c_array_types):\n tipo = tipo.strip()[:-4]\n db = Database()\n query = \"SELECT type_source FROM types WHERE type_name = '\" + tipo + \"' ORDER BY type_id\"\n self.source_file = list(db.execute_query(query))\n if self.source_file: # Validacion por si la query no encontro valores\n self.source_file = self.source_file[0][0]\n db.close_connection()", "def meta_db_tables(self) -> list:\r\n def _passer(**kwargs):\r\n data = self.engine.execute(\"\"\"\r\n SELECT * FROM sqlite_master WHERE type='table';\r\n \"\"\").fetchall()\r\n table_names = [i[1] for i in data]\r\n return table_names\r\n return self._connectionController(_passer)", "def _select_table(self):\n\n return self.postgres.execute(f\"SELECT * FROM {self.table_name};\")", "def _get_table_reflection(self, schema: str, table: str) -> Table:\n return self.sql_metadata.tables.get(f\"{schema}.{table}\",\n Table(table, self.sql_metadata, schema=schema, autoload=True))", "def _get_table(self):\n\t\treturn self._table", "def getTableByName(self, tablename):\n pass", "def get_my_tables(self):\n qnum = self.master('sql', att={'type': 'table'}) # it's a Table._call_() function call\n if self.run():\n return (self.table_factory(self.get_table_info(result[0])) for result in self.results[qnum])\n else:\n print('An error has occurred when initializing the database.')", "def _get_table(self, cursor):\n raise NotImplementedError", "def _get_types(self):\n\n db = Database()\n self.c_built_ins = list(map(lambda tup: tup[0], db.select_built_types()))\n self.c_built_in_array_types = r'^(' + '|'.join(self.escaped(self.c_built_ins)) + ')\\[[0-9]*\\]'\n self.c_types = list(map(lambda tup: tup[0], db.select_types()))\n self.c_array_types = r'^(' + '|'.join(self.escaped(self.c_types)) + ')\\[[0-9]*\\]'\n db.close_connection()", "def getTable(self):\n return self.db.table(self.entity)", "def get_tables_from_db(self):\r\n self.cursor.execute(\"SELECT name FROM sqlite_master WHERE type = 'table';\")\r\n\r\n # Return list of tuples with the names of tables --> names of profiles.\r\n self.profiles_name_list = [elem[0] for elem in self.cursor.fetchall()]\r\n self.profiles_name_list = tuple(self.profiles_name_list)", "def table(cls):\n return cls.__name__", "def print_all_tables(self):\n conn = self.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n print(cursor.fetchall())", "def getTables(self):\n\treturn self.dbNames", "def get_table_definition(jwt_payload: dict, schema_name: str, table_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n schema_virtual_module = dj.create_virtual_module(schema_name, schema_name)\n return getattr(schema_virtual_module, table_name).describe()", "def get_db(self, typename):\n return self._dbs[typename]", "def get_table_def(dict_in, db_in):\n meta = MetaData(db_in)\n \n val_mapping = {\n 'pressure': Integer,\n 'temperature': Float,\n 'humidity': Float,\n 'battery': Integer,\n 'colorTemperature': Integer,\n }\n \n val_type = val_mapping.get(dict_in['name'], String)\n \n\n table_def = Table(dict_in['name'], meta, \n Column('source', String),\n Column('name', String),\n Column('displayName', String),\n Column('value', String),\n Column('unit', String),\n Column('deviceId', Integer),\n Column('hubId', Integer),\n Column('locationId', Integer),\n Column('installedAppId', Integer),\n Column('descriptionText', String),\n Column('timestamp', DateTime),\n )\n return table_def", "def get_schema(db, sourcename):\n try:\n schema = db[\"tables\"][sourcename]\n schema[\"type\"] = constants.TABLE\n except KeyError:\n try:\n schema = db[\"views\"][sourcename]\n schema[\"type\"] = constants.VIEW\n except KeyError:\n raise ValueError(\"no such table/view\")\n return schema", "def get_tablename(self):\n return self.ds_table", "def _source_type(self):\n pass", "def getTableNames(self):\n\tif not self.dbNames:\n\t # get db table names from DB\n\t if self.dbType==\"sqlite\":\n\t query=\"SELECT name FROM sqlite_master WHERE type='table';\"\n\t elif self.dbType==\"mysql\":\n\t query=\"SHOW TABLES\"\n\t self.startTxn(\"SQLUtil.__init__\")\n\t tup=self.fetchAll(query)\n\t self.endTxn(\"SQLUtil.__init__\")\n\t for item in tup:\n\t self.dbNames.append(item[0])\n\treturn self.dbNames", "def get_tables(self, db_name):\n pass", "def _load_type_tables(self):\n logger.info(\"Reading content of type tables...\")\n for table_name in self.type_tables:\n logger.info(f\"Reading JSONL dump of type table '{table_name}'...\")\n table_jsonl = resource_stream('sotorrent_pipeline',\n f'type_tables/{table_name}.jsonl').read().decode()\n self.type_tables_jsonl[table_name] = table_jsonl\n logger.info(f\"Read {len(self.type_tables_jsonl)} type table(s).\")", "def test_reflection(self):\n m = MetaData()\n\n t = Table('test_table_syn', m, autoload=True,\n autoload_with=testing.db, oracle_resolve_synonyms=True)\n eq_(t.c.keys(), ['id', 'data'])\n eq_(list(t.primary_key), [t.c.id])", "def get_tables():\n return execute(\"SELECT name FROM sqlite_master WHERE type = 'table';\")", "def get_source_type(import_file, source_type=''):\n\n # TODO: move source_type to a database lookup. Right now it is hard coded\n source_type_str = getattr(import_file, 'source_type', '') or ''\n source_type_str = source_type or source_type_str\n source_type_str = source_type_str.upper().replace(' ', '_')\n\n return getattr(models, source_type_str, ASSESSED_RAW)", "def _get_raw_entity_kind(cls, entity_kind):\r\n model_class = util.for_name(entity_kind)\r\n return model_class._meta.db_table", "def get_type(self) -> str:\n return Tables.ESL.name", "def get_tables(self):\n return self._get_types_from_default_ns(Table)", "def getTableDefForTable(self, tableName):\n\t\tif not \".\" in tableName:\n\t\t\ttableName = \"public.\"+tableName\n\t\t\n\t\tfor row in self.readerConnection.queryToDicts(\n\t\t\t\t\"select sourcerd, tablename from dc.tablemeta where\"\n\t\t\t\t\" lower(tableName)=%(tableName)s\",\n\t\t\t\t{\"tableName\": tableName.lower()}):\n\t\t\tbreak\n\t\telse:\n\t\t\traise base.ui.logOldExc(\n\t\t\t\tbase.NotFoundError(tableName, \"table\", \"dc_tables\"))\n\n\t\treturn base.caches.getRD(row[\"sourcerd\"]\n\t\t\t).getById(row[\"tablename\"].split(\".\")[-1])", "def table_names(self, cursor=None):\r\n return [kind.key().name() for kind in Query(kind='__kind__').Run()]", "def read_sql(self):\n pass", "def get_table(base, engine):\n class w1_temp_table(base):\n __tablename__ = 'w1_temp'\n __table_args__ = {\"useexisting\": True}\n\n id = sa.Column(sa.types.Integer, primary_key=True)\n logger_id = sa.Column(sa.types.Integer)\n value = sa.Column(sa.types.String)\n datetime = sa.Column(sa.types.DateTime)\n return w1_temp_table", "def tableName():\n return \"people\"", "def get_source_schema(cls) -> dict:\n source_schema = get_base_schema(\n root=True,\n id_=\"source.schema.json\",\n title=\"Source data schema\",\n description=\"Schema for the source data, files and directories\",\n version=\"0.1.0\",\n )\n for interface_name, data_interface in cls.data_interface_classes.items():\n source_schema[\"properties\"].update({interface_name: unroot_schema(data_interface.get_source_schema())})\n return source_schema", "def source(self) -> Dict:\n return self._db_data.metadata[\"_source\"]", "def showTables():\n global cursor\n #cursor.execute('SELECT * FROM *')\n cursor.execute('''SELECT * FROM sqlite_master WHERE type='table' ''')\n\n tables = cursor.fetchall()\n print \"Tables available are:\"\n print tables[0]", "def getSourceName(self, instance):\n mapping = IAnnotations(instance).setdefault(\n 'collective.table',\n PersistentMapping()\n )\n return mapping.get('source_name', self.defaultSourceName)", "def _db_init_data_tables(self):\n\n #\n # TESTTYPE table\n #\n return self._db_execute(\n \"\"\"\n create table TESTTYPE (\n KEY text unique,\n VALUE text\n )\n \"\"\"\n )", "def load_schema_for_modelling():\n filename = \"modelling_schema.csv\"\n folder = os.path.abspath(os.path.dirname(__file__))\n path = os.path.join(folder, filename)\n return pd.read_csv(path).set_index('table_name')", "def resource_type(self) -> str:\n return 'TABLE'", "def cvv_ttype_table(argv):\n p = optparse.OptionParser()\n p.add_option('-D', '--drop',\n action='store_true', default=False, dest='drop',\n help='drop the table')\n p.add_option('-d', '--debug',\n action='store_true', default=False, dest='debug',\n help='run the debugger')\n p.add_option('-r', '--root',\n action='store', default='', dest='hpssroot',\n help='where to look for data')\n try:\n (o, a) = p.parse_args(argv)\n except SystemExit:\n return\n\n if o.debug:\n pdb.set_trace()\n\n # lookup and report tape type for each pathname specified\n if o.drop:\n result = dbschem.drop_table(table=\"tape_types\")\n print result\n else:\n dbschem.make_table(\"tape_types\")\n\n hpssroot = o.hpssroot\n if hpssroot == '':\n hpssroot = os.getenv(\"HPSS_ROOT\")\n if hpssroot is None:\n hpssroot = \"/opt/hpss\"\n\n tape_types_populate(hpssroot)", "def get_table_attribute_from_base_class(self, source_table_name: str):\n return getattr(self.connection.base.classes, source_table_name)", "def get_table_column_names_and_types(\n self, config: RepoConfig\n ) -> Iterable[Tuple[str, str]]:\n\n from feast.infra.offline_stores.snowflake import SnowflakeOfflineStoreConfig\n from feast.infra.utils.snowflake_utils import (\n execute_snowflake_statement,\n get_snowflake_conn,\n )\n\n assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig)\n\n snowflake_conn = get_snowflake_conn(config.offline_store)\n\n if self.database and self.table:\n query = f'SELECT * FROM \"{self.database}\".\"{self.schema}\".\"{self.table}\" LIMIT 1'\n elif self.table:\n query = f'SELECT * FROM \"{self.table}\" LIMIT 1'\n else:\n query = f\"SELECT * FROM ({self.query}) LIMIT 1\"\n\n result = execute_snowflake_statement(snowflake_conn, query).fetch_pandas_all()\n\n if not result.empty:\n metadata = result.dtypes.apply(str)\n return list(zip(metadata.index, metadata))\n else:\n raise ValueError(\"The following source:\\n\" + query + \"\\n ... is empty\")", "def get_table(tname, request):\n pyramid_sacrud_models = get_models_from_settings(request)\n try:\n models = dict(pyramid_sacrud_models)\n except ValueError:\n models = dict((pyramid_sacrud_models, ))\n finally:\n models = models.values()\n\n tables = itertools.chain(*[model for model in models if model])\n tables = [\n table for table in tables\n if (table.__tablename__).lower() == tname.lower()\n and table\n ]\n if not tables:\n return None\n return tables[0]", "def table(self):\n return self.snowflake_options.table", "def get_type(self) -> str:\n return Tables.USER.name", "def read(tablename: str()):\n return pd.read_csv(tablename, dtype={'source_id': str})", "def table_name() -> str:\n pass", "def show_tables(self):\n query = \"SELECT name FROM sqlite_master WHERE type = 'table'\"\n try:\n temp = self.__cur.execute(query)\n except Exception as e:\n self.__conn.rollback()\n raise e\n\n tables = []\n for x in temp:\n tables.append(x[\"name\"])\n del temp\n return tables", "def getTable(self):\n\n raise NotImplementedError", "def _get_table_obj(self, mode):\n return self.client[f\"bigquery_{mode}\"].get_table(self.table_full_name[mode])", "def __getTable(self):\n\n if not self.__table:\n tableConnectionParams = parseConnectionString(\n self.tableConnString);\n\n self.__table = Table(\n tableConnectionParams['name'],\n connection = getDbConnection(tableConnectionParams));\n\n return self.__table;", "def table(self):\n return self._table_name", "def _read_metadata(self, conn, tbl_name): \n # Split table name in libname and actual table name\n name, schema = tuple(tbl_name.split('.'))\n # Query the Vertica dictionary to get types and formats\n query = \"\"\"\n SELECT column_name as NAME, data_type as TYPE, data_type_length AS LENGTH \n FROM v_catalog.columns \n WHERE table_schema = '{}' AND table_name = '{}'\n \"\"\".format(name, schema)\n \n md = conn.fetch(query)\n if not len(md):\n raise ValueError('No metadata for table {}'.format(tbl_name))\n\n md = (md\n # Use variable names as row names, then remove the NAME column\n .set_index('NAME', inplace=False)\n # Compute the number of bytes for each variable It is given by the LENGTH variable\n .rename({'LENGTH': 'NUM_BYTES'}, axis=1))\n\n # Identify data types\n type_upper = md['TYPE'].str.upper()\n md['IS_TEXT'] = type_upper.str.startswith('VARCHAR')\n md['IS_BOOLEAN'] = type_upper == 'BOOLEAN'\n md['IS_INTEGER'] = type_upper.isin(['INT', 'INTEGER'])\n md['IS_FLOAT'] = (type_upper == 'FLOAT') | type_upper.str.startswith('NUMERIC')\n md['IS_DATE'] = type_upper == 'DATE'\n md['IS_TIMESTAMP'] = type_upper == 'TIMESTAMP'\n md['IS_TIME'] = type_upper == 'TIME'\n # Determine datetime formats for date and time data\n md['DATETIME_FORMAT'] = np.nan\n md.loc[md['IS_DATE'], 'DATETIME_FORMAT'] = 'yyyy-MM-dd'\n md.loc[md['IS_TIME'], 'DATETIME_FORMAT'] = 'HH:mm:ss'\n # Determine datetime formats for timestamp data\n # For timestamp data, the right format is:\n # - yyyy-MM-dd HH:mm:ss.0 with a JDBC connection <-- python default\n # - yyyy-MM-dd HH:mm:ss with an ODBC connection\n md.loc[md['IS_TIMESTAMP'], 'DATETIME_FORMAT'] = 'yyyy-MM-dd HH:mm:ss.0'\n\n # Original type\n md.rename({'TYPE': 'TYPE_IN_SOURCE'}, axis=1, inplace=True)\n # Create the metadata catalog\n md = MetadataCatalog(md, is_case_sensitive=False)\n # Check that all formats have been correctly processed\n format_check = md.check_metadata_completeness()\n if not all(format_check):\n unsupported_format = md.get_type_in_source()\n unsupported_format = unsupported_format[~format_check].unique()\n raise ValueError('Unsupported Vertica format: {}'.format(unsupported_format))\n return md", "def getTable(self):\n return self.table", "def _get_tabletype(cls) -> str:\n return 'HTML'", "def parse_table_schema(conn):\r\n cur = conn.cursor()\r\n\r\n cur.execute(\"PRAGMA table_info({})\".format(\"week5\"))\r\n print(cur.fetchall())", "def source_entity_type_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"source_entity_type_name\")", "def make_new_tbl(self):\n debug = False\n default_dd = getdata.get_default_db_dets()\n con, cur = default_dd.con, default_dd.cur\n oth_name_types = getdata.get_oth_name_types(self.settings_data)\n tblname = self.tblname_lst[0]\n if debug: print(f'DBE in make_new_tbl is: {default_dd.dbe}')\n getdata.make_sofa_tbl(\n con, cur, tblname, oth_name_types, headless=False)\n wx.MessageBox(\n _('Your new table has been added to the default SOFA database'))", "def test_get_table_list(self):\n db_introspection = DatabaseIntrospection(self.connection)\n cursor = mock.MagicMock()\n\n def list_tables(*args, **kwargs):\n return [[\"Table_1\", \"t\"], [\"Table_2\", \"t\"]]\n\n cursor.run_sql_in_snapshot = list_tables\n table_list = db_introspection.get_table_list(cursor=cursor)\n self.assertEqual(\n table_list,\n [\n TableInfo(name=\"Table_1\", type=\"t\"),\n TableInfo(name=\"Table_2\", type=\"t\"),\n ],\n )", "def getTableSchema(self,tableName):\n\tif not self.schemaDict.has_key(tableName):\n\t if self.dbType==\"sqlite\":\n\t query = \"SELECT * FROM sqlite_master WHERE name='%s'\"%tableName\n\t tup = self.fetchOne(query)\n\t schema= tup[4]\n\t else: # MySQL \n\t query = \"DESCRIBE %s\"%tableName\n\t tup = self.fetchAll(query)\n\t schema= \"CREATE TABLE %s (\"%tableName\n\t for item in tup:\n\t name = item[0]\n\t\t type = item[1]\n\t\t priKey = item[3]\n\t\t autoInc = item[5] \n\t schema+=name+' '+type+' '+priKey+' '+autoInc\n\t\t if item!=tup[-1]:\n\t\t schema+=','\n\t schema+=\" )\"\n\t return schema\n\telse:\n\t return self.schemaDict[tableName]", "def make_temp_tbl(self, type: str = \"user_details\"):\n uid = uuid.uuid4()\n temp_tbl_name = \"temp_\" + str(uid).replace('-', '_')\n\n if self.config.dbtype.lower() == \"mysql\":\n create_temp_tbl_sql = f\"CREATE TABLE {temp_tbl_name} LIKE {type};\"\n elif self.config.dbtype.lower() == \"sqlite\":\n create_temp_tbl_sql = f\"CREATE TABLE {temp_tbl_name} AS SELECT * FROM {type} WHERE 0\"\n self.engine.execute(create_temp_tbl_sql)\n return temp_tbl_name", "def _create_TableDescriptor(self):\n\n self.conn.cursor.execute(\"PRAGMA table_info(\" + self.table_name + \")\")\n descriptions = self.conn.cursor.fetchall()\n column_map = {}\n for description in descriptions:\n column_map[description[1]] = description[2]\n td = TD(self.table_name, column_map) \n\n# self.conn.cursor.execute(\"SELECT sql FROM sqlite_master WHERE name='{tb}'\"\\\n# .format(tb=self.table_name))\n# aa = str(self.conn.cursor.fetchone()[0])\n# sindx = aa.find(\"(\")\n# eindx = aa.find(\")\")\n# aa = aa[sindx+1:eindx]\n# aa = aa.split(\",\")\n# column_map = {kyval.split()[0]:kyval.split()[1] for kyval in aa}\n# td = TD(self.table_name, column_map) \n\n return td", "def test_get_table_description(self):\n db_introspection = DatabaseIntrospection(self.connection)\n cursor = mock.MagicMock()\n\n def description(*args, **kwargs):\n return [[\"name\", TypeCode.STRING], [\"age\", TypeCode.INT64]]\n\n def get_table_column_schema(*args, **kwargs):\n column_details = {}\n column_details[\"name\"] = ColumnDetails(\n null_ok=False, spanner_type=\"STRING(10)\"\n )\n column_details[\"age\"] = ColumnDetails(\n null_ok=True, spanner_type=\"INT64\"\n )\n return column_details\n\n cursor.get_table_column_schema = get_table_column_schema\n cursor.description = description()\n table_description = db_introspection.get_table_description(\n cursor=cursor, table_name=\"Table_1\"\n )\n if USING_DJANGO_3:\n self.assertEqual(\n table_description,\n [\n FieldInfo(\n name=\"name\",\n type_code=TypeCode.STRING,\n display_size=None,\n internal_size=10,\n precision=None,\n scale=None,\n null_ok=False,\n default=None,\n collation=None,\n ),\n FieldInfo(\n name=\"age\",\n type_code=TypeCode.INT64,\n display_size=None,\n internal_size=None,\n precision=None,\n scale=None,\n null_ok=True,\n default=None,\n collation=None,\n ),\n ],\n )\n else:\n self.assertEqual(\n table_description,\n [\n FieldInfo(\n name=\"name\",\n type_code=TypeCode.STRING,\n display_size=None,\n internal_size=10,\n precision=None,\n scale=None,\n null_ok=False,\n default=None,\n ),\n FieldInfo(\n name=\"age\",\n type_code=TypeCode.INT64,\n display_size=None,\n internal_size=None,\n precision=None,\n scale=None,\n null_ok=True,\n default=None,\n ),\n ],\n )", "def getTableNames(self):\n\n # The specific command depends on whether we are using mysql or sqlite\n if self.connector == 'mysql':\n sqlcmd = (\"SELECT table_name FROM INFORMATION_SCHEMA.TABLES \" +\n \"WHERE table_schema='\" + self.dbname + \"'\")\n else:\n sqlcmd = \"SELECT name FROM sqlite_master WHERE type='table'\"\n\n self._c.execute(sqlcmd)\n tbnames = [el[0] for el in self._c.fetchall()]\n\n return tbnames", "def source_entity_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"source_entity_type\")", "async def table(name: str):\n stub = os.path.dirname(__file__) + '/stubs/table.py'\n dest = uvicore.config('app.paths.tables') + '/' + name + '.py'\n\n Schematic(\n type='table',\n stub=stub,\n dest=dest,\n replace = [\n ('xx_tablename', name),\n ('xx_TableName', str.studly(name)),\n ]\n ).generate()\n\n uvicore.log.nl()\n uvicore.log.notice('Be sure to add this table to your ./database/tables/__init__.py')", "def __init__(self, container_type, datasource=sybase.NULL_SOURCE):\n super(sytable, self).__init__(container_type,\n datasource or sybase.NULL_SOURCE)\n columns = datasource.columns()\n self._name = datasource.read_name()\n self._attributes = datasource.read_table_attributes() or {}\n self._number_of_rows = datasource.number_of_rows()\n self._columns = OrderedDict(\n (column, _column_factory(datasource, column, self._number_of_rows))\n for column in columns)\n\n self._column_attributes = dict.fromkeys(columns)\n self._dirty = False", "def _convert_rosetta_db_to_basic_db(self):\n pass", "def get_schema(self, engine, frame, name, keys=None):\n pandas_sql = SQLDatabase(engine, schema=None, meta=None)\n return pandas_sql._create_sql_schema(frame, name, keys=keys)", "def __tablename__(cls):\n return get_table_name(cls.__name__)", "def test_table_name(self):\n obs = PrepTemplate._table_name(1)\n self.assertEqual(obs, \"prep_1\")", "def tablename(entity) -> str:\n return entity.__tablename__", "def schema(self):\n return self.table_info.schema", "def grasspi_print_db(table_name):\n\n conn = sqlite3.connect(grasspi_config.cfg.db_file)\n conn.text_factory = str\n c = conn.cursor()\n val = \"SELECT * FROM \" + table_name\n for row in c.execute(val):\n #conn.text_factory = str\n print row\n c.close()", "def connect_db(stdscr):\n global database\n msg = get_msg(stdscr, \"Enter database name. Exit with ENTER or C-g\")\n logging.debug(msg)\n database = Database(msg.strip())\n logging.debug(database.engine.table_names())\n return select_player(stdscr)", "def get_tables(self):\n logging.debug(f\"\"\"get_tables\"\"\")\n conn = self.connect(cxRepo)\n sql = f\"\"\"select table_name,server1_select,server2_select,schema1,\n schema2,tips from {self.schemaRepo}.tablediff\n where step = 0 and result = 'init' order by id\"\"\"\n with conn:\n with conn.cursor() as curs:\n try:\n curs.execute(sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {sql} : {error}\"\"\")\n rows = curs.fetchall()\n return rows", "def connect(self):\n if self.type != 'sqlite':\n # log non-sqlite uses of raw connections for troubleshooting, since\n # unless the developer had a good reason to use this instead of\n # `session()`, it indicates the plugin was written before Sopel 7.0\n # and might not work right when connected to non-sqlite DBs\n LOGGER.info(\n \"Raw connection requested when 'db_type' is not 'sqlite':\\n\"\n \"Consider using 'db.session()' to get a SQLAlchemy session \"\n \"instead here:\\n%s\",\n traceback.format_list(traceback.extract_stack()[:-1])[-1][:-1])\n return self.engine.raw_connection()", "def query_tables(self):\n # Find all tables\n tables_q = \"SELECT name FROM sqlite_master WHERE type = 'table' AND name NOT LIKE \\'sqlite_%\\';\"\n tables = self.query(tables_q)\n # print(tables)\n return tables", "def test_connection():\n database = r'.\\data\\SQLite\\chinook.db'\n\n try:\n conn = sqlite3.connect(database)\n table_names = conn.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n for table_name in table_names:\n print(table_name[0])\n\n finally:\n try: conn.close()\n except: pass", "def get_table(engine, name):\n metadata = sqlalchemy.schema.MetaData()\n metadata.bind = engine\n return sqlalchemy.Table(name, metadata, autoload=True)", "def _get_tabletype(cls) -> str:\n return 'Markdown'", "def get_target_table(self, source):\n target_tables = set()\n target_fields = [t[1] for t in self.mapping.items() if t[0].split('.')[0] == source]\n for f in target_fields:\n target_tables.update([c.split('.')[0] for c in f.keys()])\n self.target_tables = list(target_tables)\n return self.target_tables", "def table(entity) -> sa.Table:\n return entity.__table__", "def import_table(ctx: DataFunctionContext, table_name: str, copy: bool = True):\n target_storage = ctx.execution_config.get_target_storage()\n if ensure_bool(copy):\n as_identifier = target_storage.get_api().get_quoted_identifier\n sql = f\"select * from {as_identifier(table_name)}\"\n # TODO: DRY this pattern\n sdf = SqlDataFunctionWrapper(sql)\n\n def get_sql(*args, **kwargs):\n return sql\n\n sdf.get_compiled_sql = get_sql\n return sdf(ctx)\n else:\n ctx.emit(\n name=table_name,\n storage=target_storage,\n data_format=\"table\",\n create_alias_only=True,\n )", "def info_table(table):\n print \"\\nSCHEMA de la taula \",table, \"es: \"\n con=lite.connect('parking.db')\n cur=con.cursor()\n cur.execute(\"PRAGMA table_info({});\".format(table))\n data = cur.fetchall()\n for d in data:\n print \"\\t\",d[0], d[1], d[2]\n con.close()", "def schema(cls, only_self: bool=False):\n try:\n md_tbls = cls.metadata.tables\n insp = reflection.Inspector.from_engine(cls.s.bind.engine)\n tbls = dict()\n for tbl in insp.get_table_names():\n if not only_self or (only_self and tbl == cls.__tablename__):\n cols = dict()\n for col in insp.get_columns(tbl):\n info = dict(col)\n col_info = md_tbls[tbl].c[col['name']]\n info['type'] = {\n 'compiled': col['type'].compile(),\n 'native': col['type'].python_type.__name__\n }\n info['type']['length'] = col['type'].length if hasattr(col['type'], 'length') else None\n if info['autoincrement']:\n info['default'] = 'autoincrement'\n info.update(col_info.info)\n info['placeholder'] = '%s_%s' % (tbl, col['name'])\n cols[col['name']] = info\n tbls[tbl] = cols\n\n return tbls\n except SQLAlchemyError:\n cls.s.rollback()\n raise", "def create_table(self):\n pass", "def _tables(self):\n assert False, \"subclass responsibility\"", "def load_database(database_type):\n f = open(\"database.p\", \"rb\")\n database = pickle.load(f)\n f.close()\n\n if database_type is \"dict\":\n return database\n elif database_type is \"list\":\n return database.values()", "async def _init_database(self):\n await self.database.executescript(\n f\"\"\"\n CREATE TABLE IF NOT EXISTS \"{DBUser.table_name}\" (\n \"user_id\" INTEGER PRIMARY KEY AUTOINCREMENT,\n \"name\" TEXT NOT NULL UNIQUE,\n \"created_at\" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,\n \"creation_flags\" INTEGER NOT NULL DEFAULT 0,\n \"creation_metadata\" TEXT,\n \"comment\" TEXT\n );\n CREATE TABLE IF NOT EXISTS \"{DBUserAlias.table_name}\" (\n \"user_id\" INTEGER NOT NULL,\n \"alias\" TEXT NOT NULL,\n \"case_sensitive\" BOOLEAN NOT NULL DEFAULT 0 CHECK(case_sensitive IN (0,1)),\n \"created_at\" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,\n \"creation_flags\" INTEGER NOT NULL DEFAULT 0,\n \"creation_metadata\" TEXT,\n \"comment\" TEXT,\n PRIMARY KEY (\"user_id\", \"alias\"),\n FOREIGN KEY (\"user_id\") REFERENCES \"{DBUser.table_name}\" (\"user_id\")\n ON DELETE CASCADE\n );\n CREATE TABLE IF NOT EXISTS \"{Participant.table_name}\" (\n \"participant_id\" INTEGER NOT NULL,\n \"name\" TEXT NOT NULL UNIQUE,\n \"user_id\" INTEGER,\n PRIMARY KEY (\"participant_id\"),\n FOREIGN KEY (\"user_id\") REFERENCES \"{DBUser.table_name}\" (\"user_id\")\n ON DELETE SET NULL\n );\n CREATE TABLE IF NOT EXISTS \"protocols\" (\n \"identifier\" TEXT NOT NULL,\n \"name\" TEXT NOT NULL,\n PRIMARY KEY (\"identifier\")\n ) WITHOUT ROWID;\n CREATE TABLE IF NOT EXISTS \"{Source.table_name}\" (\n \"source_id\" INTEGER NOT NULL,\n \"protocol\" TEXT NOT NULL,\n \"server\" TEXT,\n \"channel\" TEXT,\n PRIMARY KEY (\"source_id\"),\n FOREIGN KEY (\"protocol\") REFERENCES \"protocols\" (\"identifier\")\n ON UPDATE CASCADE\n );\n\n CREATE VIEW IF NOT EXISTS users_all_names (\n user_id, name, case_sensitive\n ) AS\n SELECT user_id, name, 1 FROM \"{DBUser.table_name}\"\n UNION\n SELECT user_id, alias, case_sensitive FROM \"{DBUserAlias.table_name}\";\n\n CREATE VIEW IF NOT EXISTS participants_all_names (\n participant_id, user_id, name, case_sensitive\n ) AS\n SELECT participant_id, user_id, name, 1 FROM \"{Participant.table_name}\"\n UNION\n SELECT participant_id, user_id, alias, case_sensitive FROM \"{DBUserAlias.table_name}\"\n JOIN \"{Participant.table_name}\" USING(user_id);\n\n CREATE UNIQUE INDEX IF NOT EXISTS \"idx_participants_user_id\"\n ON \"{Participant.table_name}\" (\"user_id\");\n\n CREATE TRIGGER IF NOT EXISTS tg_update_participant_name_from_user\n AFTER UPDATE OF name ON \"{DBUser.table_name}\"\n BEGIN\n UPDATE \"{Participant.table_name}\"\n SET name = new.name\n WHERE user_id = new.user_id;\n END;\n\n CREATE TRIGGER IF NOT EXISTS tg_update_user_name_from_participant\n AFTER UPDATE OF name ON \"{Participant.table_name}\"\n BEGIN\n UPDATE \"{DBUser.table_name}\"\n SET name = new.name\n WHERE user_id = new.user_id;\n END;\n\n CREATE TRIGGER IF NOT EXISTS tg_new_user_upsert_participants\n AFTER INSERT ON \"{DBUser.table_name}\"\n BEGIN\n INSERT INTO \"{Participant.table_name}\" (name, user_id)\n VALUES (new.name, new.user_id)\n ON CONFLICT (name) DO UPDATE\n SET user_id = new.user_id;\n END;\n\n CREATE TRIGGER IF NOT EXISTS tg_prevent_linked_participant_delete\n BEFORE DELETE ON \"{Participant.table_name}\"\n BEGIN\n SELECT RAISE(FAIL, 'Can''t delete participant that is linked to a user')\n FROM \"{Participant.table_name}\"\n WHERE participant_id = old.participant_id\n AND user_id IS NOT NULL;\n END;\n \"\"\"\n )", "def fetch_table_schema(self, table_name):\n ddl = self.query(sql.show_create_table(table_name))\n if ddl:\n try:\n return parse_create(ddl[0][\"Create Table\"])\n except ParseError as e:\n raise OSCError(\n \"TABLE_PARSING_ERROR\",\n {\"db\": self._current_db, \"table\": self.table_name, \"msg\": str(e)},\n )", "def table_name(self) -> str:\n return self.model._meta.db_table", "def db_for_read(self, model, **hints):\n model_name = model._meta.label_lower\n pos = model_name.find('.')\n table_name = model_name[pos+1:]\n if table_name in self.route_encuestas:\n return 'encuestas'\n elif table_name in self.route_uxxienc_resul:\n return 'uxxienc_resul'\n return None" ]
[ "0.63558096", "0.6177287", "0.58582294", "0.57778627", "0.5763742", "0.57581013", "0.57396495", "0.5699226", "0.5658652", "0.5625839", "0.5606744", "0.5599866", "0.55963886", "0.5581121", "0.55795485", "0.5569123", "0.55615866", "0.5508438", "0.55081236", "0.5479944", "0.54672337", "0.5453464", "0.5430561", "0.5429354", "0.5426356", "0.54054195", "0.5405398", "0.54013175", "0.5396266", "0.5392706", "0.53888315", "0.5378579", "0.5363304", "0.53621805", "0.5339963", "0.53396666", "0.5337529", "0.53039014", "0.53001785", "0.52954936", "0.52921927", "0.5286438", "0.52790743", "0.52783775", "0.5275894", "0.5275007", "0.5266536", "0.5244497", "0.524389", "0.5240902", "0.52310693", "0.5225438", "0.5214508", "0.51858646", "0.5183284", "0.5181828", "0.51810616", "0.5179763", "0.5170819", "0.51694477", "0.5162759", "0.51587766", "0.5152489", "0.51501256", "0.5145319", "0.5135464", "0.5131588", "0.512964", "0.5126326", "0.51248354", "0.51143163", "0.5095183", "0.5092303", "0.50906307", "0.50899935", "0.50748104", "0.5071032", "0.506732", "0.506571", "0.5058847", "0.50579756", "0.50545913", "0.5041182", "0.50403446", "0.5039349", "0.5032683", "0.50220644", "0.5016299", "0.5013823", "0.5008278", "0.5007804", "0.5001458", "0.4987011", "0.4979766", "0.4979397", "0.49709865", "0.49678403", "0.49626997", "0.49613565", "0.49477172" ]
0.5021961
87
Handle table and views. Fetches them up using the context information and the inspector set when preparing the db.
def get_tables_name_and_type(self) -> Optional[Iterable[Tuple[str, str]]]: try: schema_name = self.context.database_schema.name.__root__ if self.source_config.includeTables: for table_and_type in self.query_table_names_and_types(schema_name): table_name = self.standardize_table_name( schema_name, table_and_type.name ) table_fqn = fqn.build( self.metadata, entity_type=Table, service_name=self.context.database_service.name.__root__, database_name=self.context.database.name.__root__, schema_name=self.context.database_schema.name.__root__, table_name=table_name, skip_es_search=True, ) if filter_by_table( self.source_config.tableFilterPattern, table_fqn if self.source_config.useFqnForFiltering else table_name, ): self.status.filter( table_fqn, "Table Filtered Out", ) continue yield table_name, table_and_type.type_ if self.source_config.includeViews: for view_name in self.inspector.get_view_names(schema_name): view_name = self.standardize_table_name(schema_name, view_name) view_fqn = fqn.build( self.metadata, entity_type=Table, service_name=self.context.database_service.name.__root__, database_name=self.context.database.name.__root__, schema_name=self.context.database_schema.name.__root__, table_name=view_name, ) if filter_by_table( self.source_config.tableFilterPattern, view_fqn if self.source_config.useFqnForFiltering else view_name, ): self.status.filter( view_fqn, "Table Filtered Out", ) continue yield view_name, TableType.View except Exception as err: logger.warning( f"Fetching tables names failed for schema {schema_name} due to - {err}" ) logger.debug(traceback.format_exc())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_context_data(self, **kwargs):\r\n context = super(SingleTableMixin, self).get_context_data(**kwargs)\r\n table = self.get_table()\r\n context[self.get_context_table_name(table)] = table\r\n return context", "def execute(self, context):\n\n # Initialize PostgreSQL hook\n self.postgres = PostgresHook(\n postgres_conn_id=self.postgres_conn_id,\n schema=self.postgres_schema).get_sqlalchemy_engine()\n\n # Initialize Socrata hook\n super().execute()\n\n # Load table\n table = self._select_table()\n self.table_dicts = [dict(row) for row in table]\n\n if self.replace:\n result = self.socrata.replace(self.dataset_id, self.table_dicts)\n else:\n # Code from etl-airflow\n for i in range(0, len(self.table_dicts), UPLOAD_CHUNK_SIZE):\n try:\n result = self.socrata.upsert(self.dataset_id, self.table_dicts[i:i+UPLOAD_CHUNK_SIZE])\n except:\n print(f\"Error on record {i}\")\n result = self.socrata.upsert(self.dataset_id, self.table_dicts[i:i+UPLOAD_CHUNK_SIZE])", "def _load_db(self):\n for type_ in self._types:\n try:\n type_.table(self._metadata)\n except InvalidRequestError:\n pass\n # Reflect metadata so auto-mapping works\n self._metadata.reflect(self._engine)\n # Make sure the tables exist\n self._metadata.create_all()", "def create_all_views():\n cursor.execute(articleList)\n cursor.execute(goodViews)\n cursor.execute(authorsTitles)\n cursor.execute(titleViews)\n cursor.execute(dailyTotalView)\n cursor.execute(dailyErrorView)", "def _tables(self):\n assert False, \"subclass responsibility\"", "def run(self) -> int:\n columns_sql = []\n\n model = self._flags.model\n schema = self._dbt_profile.profile.get(\"target_schema\", \"\")\n\n dbt_credentials = self._dbt_profile.profile\n connector = DB_CONNECTORS.get(dbt_credentials.get(\"type\", \"\"))\n if not connector:\n raise NotImplementedError(\n f\"Connector '{dbt_credentials.get('type')}' is not implemented.\"\n )\n\n self.connector = connector(dbt_credentials)\n\n # exit early if model is in the excluded_models list\n _ = self.is_exluded_model(model)\n columns_sql = self.connector.get_columns_from_table(\n model, schema, self._sugar_config.config.get(\"use_describe_snowflake\", False)\n )\n if columns_sql:\n return self.orchestrate_model_documentation(schema, model, columns_sql)\n return 1", "def execute(self, context):\n redshift = PostgresHook(self.redshift_conn_id)\n \n self.log.info(f'LoadFactOperator loading {self.table} table')\n redshift.run(self.sql)\n self.log.info(f'LoadFactOperator loaded {self.table} table')", "def _during_execute(self, db):\n pass", "def views(\n path, counts, nl, arrays, csv, no_headers, table, fmt, json_cols, columns, schema,\n):\n tables.callback(\n path=path,\n fts4=False,\n fts5=False,\n counts=counts,\n nl=nl,\n arrays=arrays,\n csv=csv,\n no_headers=no_headers,\n table=table,\n fmt=fmt,\n json_cols=json_cols,\n columns=columns,\n schema=schema,\n views=True,\n )", "def main(request, template=\"main.html\"):\n context = {\n \"tables\": Table.objects.all(),\n \"charform\": CharForm(),\n \"intform\": IntForm(),\n \"dateform\": DateForm()\n }\n return render(request, template, context)", "def _do_action_tables_create(self):\n\n schema_shell = os.path.join(self.bento_home, \"schema-shell\", \"bin\", \"kiji-schema-shell\")\n assert os.path.isfile(schema_shell), schema_shell\n\n # Delete the table first!\n cmd = (\n \"kiji delete --target={kiji_uri} --interactive=false; \" +\n \"kiji install --kiji={kiji_uri}\" ).format(kiji_uri=self.kiji_uri)\n self._run_kiji_job(cmd)\n\n for ddl in self.ddls:\n ddl_full_path = os.path.join(self.movie_advisor_home, ddl)\n assert os.path.isfile(ddl_full_path)\n cmd = \"{schema_shell} --kiji={kiji_uri} --file={ddl_full_path}\".format(\n schema_shell=schema_shell,\n kiji_uri=self.kiji_uri,\n ddl_full_path=ddl_full_path)\n self._run_kiji_job(cmd)", "def db_handler():\n\n pass", "def run (self):\n if self.testing:\n return\n conn = Connection(self.db, self.host, self.user, self.passwd)\n conn.execute(self.sql)\n self.table = conn.fetch()\n conn.commit()", "def _get_table(self, cursor):\n raise NotImplementedError", "def populate_db():\n\n populate_table(db, models.Department, departments_data)\n populate_table(db, models.Employee, employees_data)", "def setup_method(self):\n MANAGER._tables = {}\n MANAGER._views = {}", "def browse(table_name):\n meta = MetaData()\n meta.reflect(db.engine)\n #table = Table(table_name, meta, autoload=True, autoload_with=db.engine)\n table = meta.tables[table_name]\n\n # setup query\n q_from = table\n columns = [ ]\n\n for tc in table.c.keys():\n q_c = table.c.get(tc)\n columns.append(q_c)\n if hasattr(settings, 'DISPLAY_FK'):\n # check if we need to join other tables\n try:\n join = set()\n fks = settings.DISPLAY_FK[table_name][tc]\n for fk in fks:\n fktablename, fkcolname = fk.split(\".\", 2)\n fktable = meta.tables[fktablename]\n join.add(fktable)\n fkcol = getattr(fktable.c, fkcolname)\n fkcol.breadpy_fk = True\n columns.append(fkcol)\n for jt in join:\n q_from = q_from.join(jt)\n except KeyError, ke:\n pass\n \n q = select(from_obj=q_from, columns=columns)\n sql = \"%s\" % q\n\n # execute and fetch\n result = db.engine.execute(q)\n raw_rows = result.fetchall()\n rows = raw_rows\n\n template = get_template(\"rows/browse.html\")\n\n return template.render(table=table, rows=rows, columns=columns, sql=sql)", "def _ensure_tables(self, db):\n\n # NOTE(kgriffs): Create tables all together rather\n # than separately in each controller, since some queries\n # in the individual controllers actually require the\n # presence of more than one table.\n\n # NOTE(flaper87): Consider moving tables definition\n # outside this method.\n\n db.execute('''\n create table\n if not exists\n Messages (\n id INTEGER,\n qid INTEGER,\n ttl INTEGER,\n content DOCUMENT,\n client UUID,\n created DATETIME, -- seconds since the Julian day\n PRIMARY KEY(id),\n FOREIGN KEY(qid) references Queues(id) on delete cascade\n )\n ''')\n\n db.execute('''\n create table\n if not exists\n Queues (\n id INTEGER,\n project TEXT,\n name TEXT,\n metadata DOCUMENT,\n PRIMARY KEY(id),\n UNIQUE(project, name)\n )\n ''')\n\n db.execute('''\n create table\n if not exists\n Claims (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n qid INTEGER,\n ttl INTEGER,\n created DATETIME, -- seconds since the Julian day\n FOREIGN KEY(qid) references Queues(id) on delete cascade\n )\n ''')\n\n db.execute('''\n create table\n if not exists\n Locked (\n cid INTEGER,\n msgid INTEGER,\n FOREIGN KEY(cid) references Claims(id) on delete cascade,\n FOREIGN KEY(msgid) references Messages(id) on delete cascade\n )\n ''')", "def _load_statements(self):\n home = Path(\".\")\n context = {\"table_name\": self.TABLE}\n self.sql = {}\n for path in home.glob(\"./sql/*\"):\n with open(path) as f:\n template = Template(f.read().strip())\n self.sql[path.stem] = template.render(context)", "def get(self):\n #\n # Erzeugen des Viewhandlers\n # Parameter mit None werden vom Konstruktor automatisch\n # behandelt.\n #\n\n self.viewhandler = Viewhandler(\n controller = self, # Die Methoden des Controllers\n layout = Viewhandler.GRID_LAYOUT, # Layout [GRID_LAYOUT|LISTEDIT_LAYOUT]\n domain = UserDomain(db=self.db),\n where = None, # Where Klausel fuer SQL\n orderby = None, # Order by Klausel fuer SQL\n listparam = None, # Uebergabeparameter fuer Grid-Viewer als Dictionary\n gridlist = None, # Handler fuer die Datengewinnung der Ausgabeliste (wird automatisch erzeugt wenn None)\n filter=Utility.normalizeFilter(self.cgiparam('_filter')) # Globaler Filter\n\n )\n\n # aufrufen des Handlers\n self.viewhandler.run()", "def on_pushButton_view_clicked(self):\n content = unicode(self.comboBox.currentText())\n if content == \"职称表\":\n data = self.sql_client.get_zc_info()\n self.fill_tableview(data)\n elif content == \"文化表\":\n data = self.sql_client.get_wh_info()\n self.fill_tableview(data)\n elif content == \"部门表\":\n data = self.sql_client.get_bm_info()\n self.fill_tableview(data)", "def _create_view(self, view, schema=None, config=None):\n viewname, vschema = view[\"__tablename__\"].split(' ')[0], view[\"__schema__\"].split(' ')[0]\n try:\n dve = SQL('NULL from {}.{}').format(Identifier(vschema),\n Identifier(viewname))\n veq = self.__session.query(self._sql_to_string(dve)).limit(1)\n self.__session.execute(veq)\n self._commit()\n except ProgrammingError:\n self._rollback()\n like = text(\"information_schema.routines.routine_name like 'crosstab%'\")\n count = self.__session.query('* FROM information_schema.routines')\n count = count.filter(like).count()\n if int(count) == 0:\n self._create_extension(config)\n self.exschema = 'public'\n else:\n like = text(\"information_schema.routines.routine_name like 'crosstab%'\")\n count = self.__session.query('routine_schema FROM'\n ' information_schema.routines')\n count = count.filter(like).limit(1)\n count = self.__session.execute(count).fetchone()[0]\n self._commit()\n self.exschema = count\n like = text(\"SELECT has_schema_privilege(:exschema, 'USAGE')\")\n like = self.__session.execute(like,\n {\"exschema\": self.exschema}).fetchone()[0]\n self._commit()\n if not like:\n self._grant_access(config)\n viewst, raw = self._sql_to_string(view[\"__statement__\"]), '{}.crosstab'\n defsch = self._sql_to_string(SQL(raw).format(Identifier(schema)))\n exsch = SQL(raw).format(Identifier(self.exschema))\n self.__session.execute(viewst.replace(defsch, self._sql_to_string(exsch)))\n self._commit()\n except Exception:\n self._rollback()\n self._reset_session()\n raise", "def food_table(request):\n t = loader.get_template('family_info/food_table.html')\n c = RequestContext(request, {\n 'all_families': Family.objects.all(),\n })\n return HttpResponse(t.render(c))", "def run(self):\n\n for table in self.TABLES:\n self.dictionary_cursor.execute(f\"TRUNCATE TABLE {table}_Work\")\n self.dictionary_conn.commit()\n self.logger.info(\"work tables cleared\")\n for id in self.ids:\n drug = self.Drug(self, id)\n if drug.wanted:\n drug.load()\n self.logger.info(\"work tables populated\")\n for table in self.TABLES:\n insert = f\"INSERT INTO {table} SELECT * FROM {table}_Work\"\n self.dictionary_cursor.execute(f\"TRUNCATE TABLE {table}\")\n self.dictionary_cursor.execute(insert)\n self.dictionary_conn.commit()\n self.logger.info(\"live tables ready\")", "def execute(self,context):\n postgres = PostgresHook(postgres_conn_id = self.postgres_conn_id)\n conn = postgres.get_conn()\n cursor = conn.cursor()\n start = datetime.now()\n logging.info(\"Clearing data for each load\")\n postgres.run(\"TRUNCATE TABLE {}\".format(self.table))\n\n logging.info(f\"Loading table {self.table}\")\n sql =f\"COPY {self.table} FROM STDIN DELIMITER ',' CSV HEADER\"\n cursor.copy_expert(sql, open(self.path, \"r\"))\n conn.commit()\n logging.info(f\"Loaded table {self.table}\")\n end = datetime.now()\n time_taken = (end-start)\n logging.info(f\"Time taken:{time_taken}\")", "def view_all(entities, table, db):\n print \n print \"TABLE:\",table\n for ii in entities:\n print ii\n print", "def on_get(self, req, resp, table):\n user = req.context['user']\n columns = req.params['column'] if 'column' in req.params else None # columns to query\n start = req.params['start'] if 'start' in req.params else None # pagination: start id\n limit = req.params['limit'] if 'limit' in req.params else None # pagination: row limit\n where = base64.b64decode(req.params['where']) if 'where' in req.params else None # query filters\n\n engine = user_db_engine(user)\n key = _make_key(engine, table, columns, start, limit)\n resp.context['cache_key'] = key\n if config.use_cache() and cache.contains_query(key):\n resp.context['cache_hit'] = True\n resp.status = falcon.HTTP_200\n else:\n result, count = _select(engine, table, columns=columns, start=start, limit=limit, where=where)\n\n if config.use_cache():\n resp.context['cache_miss'] = True\n resp.context['result'] = { 'result': 'ok', 'data': result, 'total': count }\n resp.status = falcon.HTTP_200\n\n pagi = \" start from id {} limit {}\".format(start, limit) if start and limit else \"\"\n log.info(\"user [{}]: get table({}) [{}]{}\".format(user['user'], columns if columns else \"*\", table, pagi))", "def db_table(self):", "def __load_handler(self):\n with open(self.path) as file:\n for line in file:\n if line.startswith(\"\"\"# TABLE: \"\"\"):\n self.columndefinition = (line.strip('\\n')\n .replace(\"\"\"# TABLE: \"\"\", ''))\n self.tablename = self.name.replace('.', '_')\n self.tablename = self.tablename.replace('-', '_')\n self.md5_tablename = (hashlib.md5(self.tablename)\n .hexdigest()[:30])\n for columnelement in self.columndefinition.split(','):\n column = columnelement.split(':')[0].strip()\n self.columnnames.append(column)\n\n self.is_mime_handler = True", "def execute(self, context):\n redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n \n # Load Fact table from Redshift\n self.log.info('Loading Fact table %s from Redshift' % self.table_name)\n \n if self.insert_data == True:\n sql_insert = 'INSERT INTO %s %s' % (self.table_name, self.sql_query)\n redshift.run(sql_insert)\n else:\n sql_delete = 'DELETE FROM %s' % self.table_name\n redshift.run(sql_delete)\n sql_insert = 'INSERT INTO %s %s' % (self.table_name, self.sql_query)\n redshift.run(sql_insert) \n \n self.log.info('Finished loading Fact table %s' % self.table_name)", "def get_views(self):\n query = mssqlqueries.get_views()\n logger.info(u'Views query: %s', query)\n for tabular_result in self.execute_query(query):\n for row in tabular_result[0]:\n yield (row[0], row[1])", "def yield_table(\n self, table_name_and_type: Tuple[str, str]\n ) -> Iterable[Optional[CreateTableRequest]]:\n table_name, table_type = table_name_and_type\n schema_name = self.context.database_schema.name.__root__\n db_name = self.context.database.name.__root__\n try:\n\n columns, table_constraints = self.get_columns_and_constraints(\n schema_name=schema_name,\n table_name=table_name,\n db_name=db_name,\n inspector=self.inspector,\n )\n\n view_definition = self.get_view_definition(\n table_type=table_type,\n table_name=table_name,\n schema_name=schema_name,\n inspector=self.inspector,\n )\n\n table_request = CreateTableRequest(\n name=table_name,\n tableType=table_type,\n description=self.get_table_description(\n schema_name=schema_name,\n table_name=table_name,\n inspector=self.inspector,\n ),\n columns=columns,\n viewDefinition=view_definition,\n tableConstraints=table_constraints if table_constraints else None,\n databaseSchema=EntityReference(\n id=self.context.database_schema.id,\n type=\"databaseSchema\",\n ),\n tags=self.get_tag_labels(\n table_name=table_name\n ), # Pick tags from context info, if any\n )\n is_partitioned, partition_details = self.get_table_partition_details(\n table_name=table_name, schema_name=schema_name, inspector=self.inspector\n )\n if is_partitioned:\n table_request.tableType = TableType.Partitioned.value\n table_request.tablePartition = partition_details\n\n if table_type == TableType.View or view_definition:\n table_view = TableView.parse_obj(\n {\n \"table_name\": table_name,\n \"schema_name\": schema_name,\n \"db_name\": db_name,\n \"view_definition\": view_definition,\n }\n )\n self.context.table_views.append(table_view)\n\n yield table_request\n self.register_record(table_request=table_request)\n\n except Exception as exc:\n logger.debug(traceback.format_exc())\n logger.warning(f\"Unexpected exception to yield table [{table_name}]: {exc}\")\n self.status.failures.append(f\"{self.config.serviceName}.{table_name}\")", "def export_sqlite_views(self):\r\n # Gather the names of report views in the db\r\n SQL_TRAN.execute(\"SELECT name FROM sqlite_master WHERE type='view'\")\r\n view_names = SQL_TRAN.fetchall()\r\n\r\n # Export report views to tsv files\r\n for i in view_names:\r\n\r\n query = \"SELECT * FROM %s\" % (i[0])\r\n SQL_TRAN.execute(query)\r\n row = ' '\r\n # Get outfile to write to\r\n outfile = getattr(self, \"l_\" + i[0])\r\n row = SQL_TRAN.fetchone()\r\n if row is None:\r\n print(\" No records found in view {}. Nothing to export\".format(i[0]))\r\n outfile.close()\r\n os.remove(outfile.name)\r\n else:\r\n print(\" Exporting view {} from database\".format(i[0]))\r\n # For each row join using tab and output to file\r\n while row is not None:\r\n values = []\r\n try:\r\n for cell in row:\r\n if type(cell) is str or type(cell) is unicode:\r\n values.append(cell)\r\n else:\r\n values.append(unicode(cell))\r\n except:\r\n values.append(\"ERROR_IN_VALUE\")\r\n print(\"ERROR: \", row)\r\n m_row = u'\\t'.join(values)\r\n m_row = m_row + u'\\n'\r\n outfile.write(m_row.encode(\"utf-8\"))\r\n row = SQL_TRAN.fetchone()", "def events():\n if request.method == 'POST':\n if 'table' not in request.form:\n return jsonify(\n {'response': 'Please specify the table you want to access!'}, 400\n )\n table_name = request.form['table']\n table = get_table_by_name(table_name)\n if table is None:\n return jsonify({'response': f'Table {table} does not seem to exist!'}, 400)\n log(\n f\"User <code>{current_user.name}</code> is accessing <code>{request.form['table']}</code>!\"\n )\n user_data = get_data_from_table(table)\n return render_template(\n 'users.html', users=user_data, columns=table.__table__.columns._data.keys()\n )\n return render_template('events.html', events=get_accessible_tables())", "def view_index(\n request: HttpRequest,\n workflow: Optional[Workflow] = None,\n) -> HttpResponse:\n # Get the views\n views = workflow.views.values(\n 'id',\n 'name',\n 'description_text',\n 'modified')\n\n # Build the table only if there is anything to show (prevent empty table)\n return render(\n request,\n 'table/view_index.html',\n {\n 'query_builder_ops': workflow.get_query_builder_ops_as_str(),\n 'table': ViewTable(views, orderable=False),\n },\n )", "def fetch_from_db(self):\n self._potential_deals = DBApi.get_instance().potential_records\n self._filters = DBApi.get_instance().filters\n # Add markdown for url\n for data in self._potential_deals:\n data[\"url\"] = f\"[Link]({data['url']})\"\n self._potential_deals_cols = self._db_api.get_potential_deal_columns()\n self._years = self._db_api.get_unique_years(self._potential_deals)\n self._make_model = self._db_api.get_all_make_models()\n self._action_options = [\"Action1\", \"Action2\", \"Action3\"]", "def initialize(self):\r\n if not self.context:\r\n self.context = SQLContext(self.url, self.connection, self.schema)\r\n if self.table is None:\r\n self.table = self.context.table(self.table_name)\r\n if not self.fields:\r\n self.read_fields()\r\n self.field_names = self.fields.names()", "def _execute(self, db):\n raise NotImplementedError", "def test_db_page():\n create_test_object(db)\n test_objects = get_test_objects(db)\n return render_template(\"hello_db.html\", test_objects=test_objects)", "def onDatabaseLog(self):\n root1 = Tk()\n root1.title(_('Transaction Root Detail Table'))\n root1.resizable(width=False, height=False)\n DataView(root1)", "def run(self):\n if not (self.table and self.columns):\n raise Exception(\"table and columns need to be specified\")\n\n connection = self.output().connect()\n\n # attempt to copy the data into mysql\n # if it fails because the target table doesn't exist\n # try to create it by running self.create_table\n for attempt in range(2):\n try:\n cursor = connection.cursor()\n print(\"caling init copy...\")\n self.init_copy(connection)\n self.copy(cursor)\n self.post_copy(connection)\n if self.enable_metadata_columns:\n self.post_copy_metacolumns(cursor)\n except Error as err:\n if err.errno == errorcode.ER_NO_SUCH_TABLE and attempt == 0:\n # if first attempt fails with \"relation not found\", try creating table\n # logger.info(\"Creating table %s\", self.table)\n connection.reconnect()\n self.create_table(connection)\n else:\n raise\n else:\n break\n\n # mark as complete in same transaction\n self.output().touch(connection)\n connection.commit()\n connection.close()", "def index(request, template_name='index.html'):\n\n context_dict = {}\n model = Uni\n column_headers = ['rank', 'name', 'location', 'city', 'scores_overall']\n check_fields = ['scores_citations', 'scores_industry_income', 'scores_international_outlook', 'scores_research', 'scores_teaching', 'stats_student_staff_ratio', 'stats_pc_intl_students', 'stats_number_students', 'quality_of_life_index', 'safety_index', 'cost_of_living_index', 'climate_index' ]\n \n uni_main = Uni.objects.values(*column_headers)\n \n # subject filter\n if request.GET.get('sub_drop'):\n subject_filter = request.GET.get('sub_drop')\n if subject_filter == 'All':\n uni_main = Uni.objects.values(*column_headers)\n\n elif subject_filter == 'Computer Science':\n model = Uni_cs\n uni_main = Uni_cs.objects.values(*column_headers)\n elif subject_filter == 'Engineering':\n model = Uni_eng\n uni_main = Uni_eng.objects.values(*column_headers)\n else:\n uni_main = Uni.objects.values(*column_headers)\n\n\n # Locations filter\n if request.GET.get('loc_drop'):\n location_filter = request.GET.get('loc_drop')\n if location_filter == 'All':\n listings = uni_main\n\n else:\n listings = uni_main.filter(location=location_filter)\n else:\n listings = uni_main\n\n # advanced search\n qs = listings\n if request.GET.getlist('v_scores[]'):\n scorevar = request.GET.getlist('v_scores[]')\n if scorevar != ['0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0']:\n for i in range (0, len(scorevar)):\n if scorevar[i]=='0':\n check_fields[i]='0' \n check_fields = [x for x in check_fields if x != '0']\n checkvar = check_fields\n gen_rank(checkvar,scorevar,model)\n column_headers[0] = 'new_rank'\n values = column_headers + checkvar\n qs = listings.values(*values).order_by('new_rank')\n listings = qs\n uni_name = listings\n\n paginator = Paginator(listings, 10)\n page = request.GET.get('page')\n try:\n uni_list = paginator.page(page) \n except PageNotAnInteger: \n uni_list = paginator.page(1) \n except EmptyPage: \n uni_list = paginator.page(paginator.num_pages)\n\n context_dict = {'uni_list': uni_list, 'uni_name': uni_name, 'loc_list' : Uni.objects.order_by('location').values_list('location', flat=True).distinct()}\n\n # Render the HTML template index.html with the data in the context variable\n# return render(request, 'index.html', context=context)\n return render(request, template_name, context_dict)", "def on_show_database(self, _):\n if self.tablepopup:\n self.tablepopup.open()", "def recreate_tables_and_views(self, db_type: t.Union[DBType, str]) -> None:\n db_type = DBType(db_type)\n if db_type == DBType.WEB:\n recreate_web_db_schema()\n elif db_type == DBType.ORCH:\n recreate_orch_db_schema()\n else:\n raise RuntimeError(\"This code should not be reachable\")", "def data_table_page( table_type ) :\r\n logger.debug( f\"table_type={table_type}\" )\r\n model = session_info.get_user_model(session)\r\n\r\n # select table type's corresponding data\r\n if table_type == \"x\" :\r\n df = model._dfX\r\n elif table_type== \"y\" :\r\n df = model._dfY\r\n elif table_type == \"merged\" :\r\n df = model.dfMerged\r\n elif table_type == \"param\" :\r\n param = request.args[\"param\"]\r\n logger.debug(f\"param={param}\")\r\n df = model.dfMerged[[ model.id_col , f\"{param}_x\", f\"{param}_y\"]]\r\n else :\r\n logger.debug()\r\n raise ValueError( f\"Unrecognized table_type={table_type}\" )\r\n \r\n return f\"<pre>{df.to_string()}</pre>\" # TODO replace with template\r", "def main():\n cur, conn = connect('dwh.cfg')\n \n set_schema = schema_queries[1]\n cur.execute(set_schema)\n \n print('Loading Staging Tables.')\n load_staging_tables(cur, conn)\n \n print('Inserting Rows.')\n insert_tables(cur, conn)\n\n \n conn.close()", "def print_tables(self):\n\n conn = self.engine.connect()\n self.print_table(self.nodes, conn)\n self.print_table(self.paths, conn)\n self.view_tree(connection=conn)", "def sync_tables():\n sync_table(ShoppingList)\n sync_table(User)\n sync_table(Category)\n sync_table(Feed)\n sync_table(News)\n sync_table(Photo)\n sync_table(Profile)\n sync_table(Video)\n sync_type(FeedPhoto)\n sync_type(NewsPhoto)", "def meta_db_tables(self) -> list:\r\n def _passer(**kwargs):\r\n data = self.engine.execute(\"\"\"\r\n SELECT * FROM sqlite_master WHERE type='table';\r\n \"\"\").fetchall()\r\n table_names = [i[1] for i in data]\r\n return table_names\r\n return self._connectionController(_passer)", "def _before_execute(self, db):\n pass", "def db_for_read(self, model, **hints):\n model_name = model._meta.label_lower\n pos = model_name.find('.')\n table_name = model_name[pos+1:]\n if table_name in self.route_encuestas:\n return 'encuestas'\n elif table_name in self.route_uxxienc_resul:\n return 'uxxienc_resul'\n return None", "def setUp(self):\n create_table(self.DATABASE_PATH)\n self.model = model.CodeReviewDatabase(self.DATABASE_PATH)", "def create_tables():\n inf(\"Creating tables\")\n \n pinners = Table('pinners', metadata,\n Column('pinner_id', Integer, primary_key=True),\n Column('name', String(40)),\n Column('email', String(40))\n )\n pinners.create()\n \n contents = Table('contents', metadata,\n Column('content_id', Integer, primary_key=True),\n Column('url', String(80)),\n Column('display_status', String(20)), # good, objectionable, copyright\n Column('pinner_id', Integer, ForeignKey('pinners.pinner_id'))\n )\n contents.create()\n\n reviewers = Table('reviewers', metadata,\n Column('reviewer_id', Integer, primary_key=True),\n Column('name', String(40)),\n Column('email', String(40))\n )\n reviewers.create()\n\n complaints = Table('complaints', metadata,\n Column('complaint_id', Integer, primary_key=True),\n Column('complaint_timestamp', DateTime), # when the complaint was filed\n Column('complaint_type', String(80)), # objectionable, copyright\n Column('process_status', String(20)), # complaint, review, done\n Column('display_status', String(20)), # good, objectionable, copyright\n Column('review_timestamp', DateTime), # when the compliant was resolved\n Column('pinner_id', Integer, ForeignKey('pinners.pinner_id')),\n Column('reviewer_id', Integer, ForeignKey('reviewers.reviewer_id')),\n Column('content_id', Integer, ForeignKey('contents.content_id'))\n )\n complaints.create()\n \n # could create a table of \"near by\" images and/or near by features and \n # include these in the review", "def manage_society_tags_table(context):\n return context", "def dev_view(request):\n if request.user.is_authenticated() and request.user.username == \"tola\" and request.user.is_staff:\n from tola.tables_sync import update_level1, update_level2\n # update TolaTables with WorkflowLevel1 and WorkflowLevel2 data\n message = {\"attempt\": \"Running Tables Loader\"}\n\n print \"Running Script...\"\n\n try:\n update_level1()\n message['level1'] = \"Level1 Success\"\n except Exception as e:\n print '%s (%s)' % (e.message, type(e))\n message['level1'] = '%s (%s)' % (e.message, type(e))\n\n try:\n update_level2()\n message['level2'] = \"Level2 Success\"\n except Exception as e:\n print '%s (%s)' % (e.message, type(e))\n message['level2'] = '%s (%s)' % (e.message, type(e))\n\n return render(request, \"dev.html\", {'message': message})\n else:\n # log person\n print request.user.is_authenticated()\n print request.user.username\n print request.user.is_staff\n redirect_url = '/'\n return HttpResponseRedirect(redirect_url)", "def _during_execute(self, db, entity):\n pass", "def main(self):\n\t\tprint \"Retreiving view 'All\",\n\t\tview_all = self.hudson.getViewByName('All')\n\t\tprint \"Done\"\n\t\tprint \"iterating over jobs\"\n\t\tfor job in view_all.jobs.values():\n\t\t\tviewname = job.name.split(\".\")[0]\n\t\t\tif job.name not in self.getJobListFromDB():\n\t\t\t\tself.addJobToDb(job.name)\n\t\t\tif viewname not in self.getViewListFromDB():\n\t\t\t\tself.addViewToDb(viewname)\n\t\t\tfor build in job.builds:\n\t\t\t\tbo = HudsonConnector.HudsonObject( self.hudson.getDataFromUrl(build['url']) )\n\t\t\t\tstamp = datetime.datetime.fromtimestamp(bo.timestamp/1000)\n\t\t\t\tif stamp > self.lastrun:\n\t\t\t\t\tif bo.result is None:\n\t\t\t\t\t\trunname = job.name+\" #%d\" % bo.number\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tprint runname.ljust(29), str(stamp).ljust(24), bo.result.capitalize()\n\t\t\t\t\t\texcept AttributeError:\n\t\t\t\t\t\t\tprint runname.ljust(29), str(stamp).ljust(24), \"Unknown\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tjobdata = { 'name':job.name, 'view':job.name.split(\".\")[0], 'start':stamp, \n\t\t\t\t\t\t\t\t\t'end':stamp + datetime.timedelta(seconds=bo.duration),\n\t\t\t\t\t\t\t\t\t'duration':bo.duration,\n\t\t\t\t\t\t\t\t\t'result':bo.result\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\tself.uploadJobState(jobdata)\n\t\tself.saveState()", "def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n \"Created the missing tables. Applied all patched\"\n )\n\n self.db_tables_initiated = True", "def create_all_tables(self):\n pass", "def setupTables(self):\n self.cTableView.setModel(self.cModel)\n self.cTableView.setItemDelegate(MyQSqlRelationalDelegate(self))\n self.cTableView.setColumnHidden(CID, True)\n self.cTableView.setWordWrap(True)\n self.cTableView.resizeRowsToContents()\n self.cTableView.setAlternatingRowColors(True)\n self.cItmSelModel = QItemSelectionModel(self.cModel)\n self.cTableView.setSelectionModel(self.cItmSelModel)\n self.cTableView.setSelectionBehavior(QTableView.SelectRows)\n self.cTableView.setSortingEnabled(True)", "def test_context_data(self):\n response = self.client.get(self.get_url())\n context = response.context\n self.assertIn('study_table', context)\n self.assertIsInstance(context['study_table'], tables.StudyTable)", "def prepare(self):\n self.parse_template()\n self.build_argparser()\n self.parse_arguments()\n self.render_template()\n self.update_relation()", "def listruns(request):\n if not request_contains_filter_parameter(request):\n return HttpResponseRedirect(\"/%s\" % get_today_filter_parameter())\n\n context = {}\n\n \"\"\"\n Make sure that the logged in user can only see his own runs\n In case the user is not logged in show all objects,\n but remove the edit and remove buttons from the tableview.\n \"\"\"\n if request.user.is_authenticated:\n run_info_list = RunInfo.objects.filter(userid=request.user)\n run_info_filter = RunInfoFilter(request.GET, queryset=run_info_list)\n table = RunInfoTable(run_info_filter.qs)\n\n mismatching_runs, mismatching_run_registy_runs = (\n run_info_filter.qs.compare_with_run_registry()\n )\n if len(mismatching_runs) != 0:\n context[\"mismatching_runs\"] = [\n run[\"run_number\"] for run in mismatching_runs\n ]\n else:\n run_info_list = RunInfo.objects.all()\n run_info_filter = RunInfoFilter(request.GET, queryset=run_info_list)\n table = SimpleRunInfoTable(run_info_filter.qs)\n\n RequestConfig(request).configure(table)\n\n applied_filters = get_filters_from_request_GET(request)\n filter_parameters = \"\"\n for key, value in applied_filters.items():\n filter_parameters += \"&\" if filter_parameters.startswith(\"?\") else \"?\"\n filter_parameters += key + \"=\" + value\n\n context[\"filter_parameters\"] = filter_parameters\n context[\"table\"] = table\n context[\"filter\"] = run_info_filter\n context[\"run_registry_online\"] = TrackerRunRegistryClient().connection_possible()\n return render(request, \"certhelper/list.html\", context)", "def test_get_context_data(self):\n i = IndexView()\n self.assertIsInstance(i, IndexView, \"Should be an instance of IndexView\")\n context = i.get_context_data()\n\n self.assertIsNotNone(context, \"Context should not be None\")\n self.assertIsNotNone(context['sponsors'], \"Sponsors was None\")\n self.assertIsNotNone(context['communities'], \"Communities was None\")\n self.assertIsNotNone(context['news_items'], \"News items was none\")\n self.assertIsNotNone(context['images'], \"Images was none\")\n self.assertIsNotNone(context['journal_entries'], \"Journal entries was none\")", "def print_database(self):\n table_names = self.catalog\n for table_name in table_names:\n table = self.parse_table(table_name)\n if not table:\n continue\n print(f'TABLE NAME: {table_name}\\r\\n')\n print(tabulate(table, headers=\"keys\"))\n print('\\r\\n\\r\\n\\r\\n\\r\\n')", "def change_view(self, request, object_id, form_url='', extra_context=None):\n\n # Grapping table that matches topic_id.\n topic = TopicTable.objects.get(id=object_id)\n\n # Generating off-days list on-fly for template.\n off_days = [day for day in topic.off_days]\n\n # Attaching table topics and places as extra context. \n extra_context = extra_context or {}\n extra_context['topics_table'] = topic.to_list(topic.topics)\n extra_context['places_table'] = topic.to_list(topic.places)\n extra_context['off_days'] = topic.to_list(topic.off_days)\n\n return super(TopicTableAdmin, self).change_view(\n request, object_id, form_url, extra_context=extra_context,\n )", "def main(self):\n self.delete_details()\n self.delete_cleaned()\n self.vacuum()", "def show_db_overview(self):\n\n models_list = sorted_models_list()\n apps = [p.app_label for p in settings.SITE.installed_plugins]\n s = \"%d apps: %s.\" % (len(apps), \", \".join(apps))\n s += \"\\n%d models:\\n\" % len(models_list)\n i = 0\n headers = [\n #~ \"No.\",\n \"Name\",\n \"Default table\",\n #~ \"M\",\n \"#fields\",\n \"#rows\",\n #~ ,\"first\",\"last\"\n ]\n rows = []\n for model in models_list:\n if True: # model._meta.managed:\n i += 1\n cells = []\n #~ cells.append(str(i))\n cells.append(fmn(model))\n cells.append(model.get_default_table())\n #~ cells.append(str(model))\n #~ if model._meta.managed:\n #~ cells.append('X')\n #~ else:\n #~ cells.append('')\n cells.append(str(len(model._meta.concrete_fields)))\n qs = model.objects.all()\n n = qs.count()\n cells.append(str(n))\n #~ if n:\n #~ cells.append(obj2str(qs[0]))\n #~ cells.append(obj2str(qs[n-1]))\n #~ else:\n #~ cells.append('')\n #~ cells.append('')\n\n rows.append(cells)\n s += rstgen.table(headers, rows)\n return s", "def init_tables(self) -> None:\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n c = conn.cursor()\n c.execute(tables.CREATE_STUDIES_TABLE)\n c.execute(tables.CREATE_SUBMISSIONS_TABLE)\n c.execute(tables.CREATE_REQUESTERS_TABLE)\n c.execute(tables.CREATE_UNITS_TABLE)\n c.execute(tables.CREATE_WORKERS_TABLE)\n c.execute(tables.CREATE_RUNS_TABLE)\n c.execute(tables.CREATE_RUN_MAP_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUPS_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUP_QUALIFICATIONS_MAPPING_TABLE)\n conn.commit()", "def _refresh_table(self):\n self._column_selected()\n self._table_selected()\n self._column_selection_change()\n self.refresh_column_list()\n self.refresh_table_list()\n self.refresh_table()", "def on_get(self, req, resp, table, id):\n user = req.context['user']\n columns = req.params['column'] if 'column' in req.params else None\n engine = user_db_engine(user)\n key = _make_key(engine, table, columns, id, -1)\n resp.context['cache_key'] = key\n if config.use_cache() and cache.contains_query(key):\n resp.context['cache_hit'] = True\n resp.status = falcon.HTTP_200\n else:\n result, count = _select(engine, table, id=id, columns=columns)\n\n resp.context['cache_miss'] = True\n resp.context['result'] = { 'result': 'ok', 'data': result }\n resp.status = falcon.HTTP_200\n\n log.info(\"user [{}]: get row({}) with id [{}] from table [{}]\".format(user['user'], columns if columns else \"*\", id, table))", "def _handle_flask_app_and_db(cls):\n cls._set_flask_app_context()\n cls.app = cls._get_app(server.app)", "def loaddata(self):\n # Connect to the db\n self.conn, self.c = self.connect_db(self.dbname)\n # create the bdefile table to \n self.c.execute(oeeutil.sql_create_bdefile_table)\n # Delete any previous records\n self.c.execute('DELETE FROM bdefile')\n # hold the content for analysis\n for item in self.content:\n self.c.execute('INSERT INTO bdefile VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)', item)\n self.c.executescript(oeeutil.sql_create_bdefile_view)\n self.conn.commit()", "def dashboard(request):\r\n if not request.user.is_staff:\r\n raise Http404\r\n\r\n # results are passed to the template. The template knows how to render\r\n # two types of results: scalars and tables. Scalars should be represented\r\n # as \"Visible Title\": Value and tables should be lists of lists where each\r\n # inner list represents a single row of the table\r\n results = {\"scalars\":{},\"tables\":{}}\r\n\r\n # count how many users we have\r\n results[\"scalars\"][\"Unique Usernames\"]=User.objects.filter().count()\r\n results[\"scalars\"][\"Activated Usernames\"]=User.objects.filter(is_active=1).count()\r\n\r\n # count how many enrollments we have\r\n results[\"scalars\"][\"Total Enrollments Across All Courses\"] = CourseEnrollment.objects.filter(is_active=1).count()\r\n\r\n # establish a direct connection to the database (for executing raw SQL)\r\n cursor = connection.cursor()\r\n\r\n # define the queries that will generate our user-facing tables\r\n # table queries need not take the form of raw SQL, but do in this case since\r\n # the MySQL backend for django isn't very friendly with group by or distinct\r\n table_queries = {}\r\n table_queries[\"course registrations (current enrollments)\"] = \"\"\"\r\n select\r\n course_id as Course,\r\n count(user_id) as Students\r\n from student_courseenrollment\r\n where is_active=1\r\n group by course_id\r\n order by students desc;\"\"\"\r\n table_queries[\"number of students in each number of classes\"] = \"\"\"\r\n select registrations as 'Registered for __ Classes' ,\r\n count(registrations) as Users\r\n from (select count(user_id) as registrations\r\n from student_courseenrollment\r\n where is_active=1\r\n group by user_id) as registrations_per_user\r\n group by registrations;\"\"\"\r\n\r\n # add the result for each of the table_queries to the results object\r\n for query in table_queries.keys():\r\n cursor.execute(table_queries[query])\r\n results[\"tables\"][query] = SQL_query_to_list(cursor, table_queries[query])\r\n\r\n context={\"results\":results}\r\n\r\n return render_to_response(\"admin_dashboard.html\",context)", "def _process_nlx_157874_1_view(self, raw, limit=None):\n\n src_key = 'tables'\n model = Model(self.graph)\n col = self.resources[src_key]['columns']\n with open(raw, 'r') as rawread:\n reader = csv.reader(rawread, delimiter='\\t', quotechar='\\\"')\n row = next(reader)\n if not self.check_fileheader(col, row):\n pass\n\n for row in reader:\n # head -1 dvp.pr_nlx_157874_1|tr '\\t' '\\n'|\n # sed \"s|\\(.*\\)|# \\1 = row[col.index('\\1')]|g\"\n\n morphology_term_id = row[col.index('morphology_term_id')].strip()\n # morphology_term_num = row[col.index('morphology_term_num')]\n morphology_term_label = row[col.index('morphology_term_label')].strip()\n morphology_term_url = row[col.index('morphology_term_url')].strip()\n # terminology_category_label = row[\n # col.index('terminology_category_label')]\n # terminology_category_url = row[col.index('terminology_category_url')]\n # subcategory = row[col.index('subcategory')]\n objective_definition = row[col.index('objective_definition')].strip()\n subjective_definition = row[col.index('subjective_definition')].strip()\n comments = row[col.index('comments')].strip()\n synonyms = row[col.index('synonyms')].strip()\n replaces = row[col.index('replaces')].strip()\n small_figure_url = row[col.index('small_figure_url')].strip()\n large_figure_url = row[col.index('large_figure_url')].strip()\n # e_uid = row[col.index('e_uid')]\n # v_uid = row[col.index('v_uid')]\n # v_uuid = row[col.index('v_uuid')]\n # v_lastmodified = row[col.index('v_lastmodified')]\n # v_status = row[col.index('v_status')]\n # v_lastmodified_epoch = row[col.index('v_lastmodified_epoch')]\n\n # Add morphology term to graph as a class\n # with label, type, and description.\n model.addClassToGraph(\n morphology_term_id,\n morphology_term_label,\n blv.terms['PhenotypicFeature']\n )\n\n # Assemble the description text\n\n if subjective_definition != '' and not (\n re.match(r'.+\\.$', subjective_definition)):\n # add a trailing period.\n subjective_definition = subjective_definition + '.'\n if objective_definition != '' and not (\n re.match(r'.+\\.$', objective_definition)):\n # add a trailing period.\n objective_definition = objective_definition + '.'\n\n definition = ' '.join(\n (objective_definition, subjective_definition))\n\n model.addDefinition(morphology_term_id, definition,\n class_category=blv.terms['PhenotypicFeature'])\n\n # <term id> FOAF:depicted_by literal url\n # <url> type foaf:depiction\n\n # do we want both images?\n # morphology_term_id has depiction small_figure_url\n if small_figure_url != '':\n model.addDepiction(morphology_term_id, small_figure_url)\n\n # morphology_term_id has depiction large_figure_url\n if large_figure_url != '':\n model.addDepiction(morphology_term_id, large_figure_url)\n\n # morphology_term_id has comment comments\n if comments != '':\n model.addComment(morphology_term_id, comments)\n\n for syn in synonyms.split(';'):\n model.addSynonym(\n morphology_term_id,\n syn.strip(),\n self.globaltt['has_exact_synonym']\n )\n\n # morphology_term_id has_related_synonym replaces (; delimited)\n if replaces not in ['', synonyms]:\n for syn in replaces.split(';'):\n syn.strip()\n if syn != '':\n model.addSynonym(\n morphology_term_id,\n syn,\n self.globaltt['has_related_synonym']\n )\n\n # <morphology_term_id> <foaf:page> morphology_term_url\n if morphology_term_id is not None:\n reference = Reference(\n self.graph, morphology_term_id, self.globaltt['web page'])\n\n # TEC 201905:\n # Not so sure we need explicit <eom_uri> <webpage> <eom_url>.\n # since <eom_uri> IS the <eom_url>.\n\n reference.addPage(morphology_term_id, morphology_term_url)\n\n if limit is not None and reader.line_num > limit:\n break", "def get_context_data(self, **kwargs):\n context = super(IndexView, self).get_context_data(**kwargs)\n # The following two lines should appear in every context\n context['student'] = 'Nobody'\n context['tab'] = ''\n context['index'] = index_context(self.request)\n return context", "def view():\n # retrieve child and dorm parents records from database\n children = Child.query.filter_by().all()\n parents = Parent.query.filter_by().all()\n return render_template('view.html', children=children, parents=parents)", "def get_tables(self, db_name):\n pass", "def table_creater(self, tablename, columnnames, entries):\n createrurl = self.casjobsurl + '/contexts/MyDB/query'", "def load_dwh_tables(self):\n print(\"Loading the creative works table\")\n self.cur.execute(dwh_queries.INSERT_CREATIVE_WORKS_SQL_QUERY)\n self.conn.commit()\n\n print(\"Loading the participations table\")\n\n self.cur.execute(dwh_queries.INSERT_PARTICIPATIONS_SQL_QUERY)\n self.conn.commit()", "def _database(self):\n ...", "def generate_psql_views(self, schema, schema_name_v1, schema_name_v2, psql_views_path):\n psql_views = open(psql_views_path, 'w')\n psql_views.write(\"SET client_min_messages TO ERROR;\\n\")\n psql_views.write(\"DROP SCHEMA IF EXISTS %s CASCADE;\\n\\n\" % schema_name_v1)\n psql_views.write(\"CREATE SCHEMA IF NOT EXISTS %s;\\n\\n\" % schema_name_v1)\n\n for table_name_v1, table_attr in schema['tables'].iteritems():\n table_name_v2 = table_attr['name']\n columns_pri, columns_ref, columns, columns_ignore = \\\n PsqlParser._get_categorized_columns(table_attr['columns'])\n\n columns = merge_dicts(columns_pri, columns_ref, columns)\n\n columns_v2 = [ '\"'+col_attr['name']+'\"' for col_name_v1, col_attr in columns.iteritems() ]\n columns_v2 += [ 'NULL' for col_name_v1, col_attr in columns_ignore.iteritems() ]\n\n columns_v1 = [ '\"'+col_name_v1+'\"' for col_name_v1, col_attr in columns.iteritems()]\n columns_v1 += [ '\"'+col_name_v1+'\"' for col_name_v1, col_attr in columns_ignore.iteritems() ]\n\n view_sql = ('CREATE VIEW %s (%s) AS \\n SELECT %s FROM %s WITH CASCADED CHECK OPTION;\\n\\n' % (\n \"%s.%s\" % (schema_name_v1, table_name_v1),\n ', '.join(columns_v1),\n ', '.join(columns_v2),\n \"%s.%s\" % (schema_name_v2, table_name_v2)\n ))\n\n psql_views.write(view_sql + \"\\n\")\n psql_views.close()", "def load_staging_tables_docstring(cur, conn):", "def build_lookup_tables(self):\n\n for component_model in self.model_dictionary.values():\n if hasattr(component_model, 'build_lookup_tables'):\n component_model.build_lookup_tables()", "def update_views():\n # replace Supervisor main entry\n here = path.abspath(path.dirname(__file__))\n # set main page\n VIEWS['index.html'] = {'template': path.join(here, 'ui/index.html'), 'view': SupvisorsView}\n # set address /processpage\n VIEWS['procaddress.html'] = {'template': path.join(here, 'ui/procaddress.html'), 'view': ProcAddressView}\n # set address/host page\n VIEWS['hostaddress.html'] = {'template': path.join(here, 'ui/hostaddress.html'), 'view': HostAddressView}\n # set application page\n VIEWS['application.html'] = {'template': path.join(here, 'ui/application.html'), 'view': ApplicationView}\n # set fake page to export images\n VIEWS['process_cpu.png'] = {'template': path.join(here, 'ui/empty.html'), 'view': ProcessCpuImageView}\n VIEWS['process_mem.png'] = {'template': path.join(here, 'ui/empty.html'), 'view': ProcessMemoryImageView}\n VIEWS['address_cpu.png'] = {'template': path.join(here, 'ui/empty.html'), 'view': AddressCpuImageView}\n VIEWS['address_mem.png'] = {'template': path.join(here, 'ui/empty.html'), 'view': AddressMemoryImageView}\n VIEWS['address_io.png'] = {'template': path.join(here, 'ui/empty.html'), 'view': AddressNetworkImageView}", "def run_async (self):\n if self.testing:\n return\n conn = Connection(self.db, self.host, self.user, self.passwd, True)\n conn.execute(self.sql)\n self.table = conn.fetch()", "def create_tables(): \n \n pk_contraint = \"CONSTRAINT {}_pk PRIMARY KEY ({})\"\n uq_contraint = \"CONSTRAINT {}_uq UNIQUE ({})\"\n fk_query = \"\"\"CONSTRAINT {}_fk_{} \n FOREIGN KEY ({}) \n REFERENCES {}({}) \n ON UPDATE CASCADE \n ON DELETE RESTRICT\n \"\"\"\n \n create_dict = {}\n index = 1\n\n\n ############################## public SCHEMA ##############################\n \n schema = 'public'\n create_schema(schema)\n\n #################### site ####################\n table_name = 'site'\n pk_id = 'site_id'\n uq_list = ['site_code']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_code CHAR(3),\n purok VARCHAR,\n sitio VARCHAR,\n barangay VARCHAR,\n municipality VARCHAR,\n province VARCHAR,\n region VARCHAR,\n psgc INTEGER,\n active BOOLEAN NOT NULL DEFAULT TRUE,\n season SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################## spatial SCHEMA ##############################\n \n schema = 'spatial'\n create_schema(schema)\n \n #################### exposure ####################\n table_name = 'exposure'\n pk_id = 'exp_id'\n uq_list = ['exp_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n exp_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### site_exposure ####################\n table_name = 'site_exposure'\n pk_id = 'se_id'\n uq_list = ['site_id', 'exp_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'exp_id': {'ref_schema': 'spatial', 'ref_table': 'exposure'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n exp_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### feature ####################\n table_name = 'feature'\n pk_id = 'feat_id'\n uq_list = ['feat_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n feat_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_feature ####################\n table_name = 'site_feature'\n pk_id = 'sf_id'\n uq_list = ['site_id', 'feat_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'feat_id': {'ref_schema': 'spatial', 'ref_table': 'feature'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n feat_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### hazard_zone ####################\n table_name = 'hazard_zone'\n pk_id = 'hz_id'\n uq_list = ['site_id, geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### monitoring ####################\n table_name = 'monitoring'\n pk_id = 'mon_id'\n uq_list = ['mon_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n mon_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_monitoring ####################\n table_name = 'site_monitoring'\n pk_id = 'sm_id'\n uq_list = ['site_id', 'mon_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'mon_id': {'ref_schema': 'spatial', 'ref_table': 'monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n mon_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################### comm SCHEMA ###############################\n \n schema = 'comm'\n create_schema(schema)\n\n #################### gsm_server ####################\n table_name = 'gsm_server'\n pk_id = 'server_id'\n uq_list = ['server_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_name VARCHAR,\n platform_type VARCHAR,\n version SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### server_port ####################\n table_name = 'server_port'\n pk_id = 'port_id'\n uq_list = ['server_id', 'port']\n fk_dict = {'server_id': {'ref_schema': 'comm', 'ref_table': 'gsm_server'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_id INTEGER,\n port BOOLEAN,\n ser_port VARCHAR,\n pwr_on_pin SMALLINT,\n ring_pin SMALLINT,\n module_type SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- left\n 1- right'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### network_type ####################\n table_name = 'network_type'\n pk_id = 'prefix'\n uq_list = ['prefix']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} VARCHAR(3), \n carrier SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '1- globe\n 2- smart\n 3- landline'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### gsm_module ####################\n table_name = 'gsm_module'\n pk_id = 'gsm_id'\n uq_list = ['prefix', 'num', 'activated']\n fk_dict = {'prefix': {'ref_schema': 'comm', 'ref_table': 'network_type'},\n 'port_id': {'ref_schema': 'comm', 'ref_table': 'server_port'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n prefix VARCHAR(3),\n num CHAR(7),\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n port_id INTEGER,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################# temporal SCHEMA #############################\n \n schema = 'temporal'\n create_schema(schema)\n\n #################### marker_observation ####################\n table_name = 'marker_observation'\n pk_id = 'mo_id'\n uq_list = ['site_id', 'ts']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n ts TIMESTAMP,\n meas_type VARCHAR(7),\n weather VARCHAR,\n observer_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_history ####################\n table_name = 'marker_history'\n pk_id = 'hist_id'\n uq_list = ['sm_id', 'ts', 'event']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n sm_id BIGINT,\n ts TIMESTAMP,\n event BOOLEAN,\n label_name VARCHAR,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- rename\n 1- reposition'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_data ####################\n table_name = 'marker_data'\n pk_id = 'data_id'\n uq_list = ['sm_id', 'mo_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'mo_id': {'ref_schema': 'temporal', 'ref_table': 'marker_observation'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n mo_id BIGINT,\n sm_id BIGINT,\n measurement NUMERIC(5,1),\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_alert ####################\n table_name = 'marker_alert'\n pk_id = 'alert_id'\n uq_list = ['data_id']\n fk_dict = {'data_id': {'ref_schema': 'temporal', 'ref_table': 'marker_data'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n data_id BIGINT,\n displacement NUMERIC(4,1),\n time_delta FLOAT,\n alert_level SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger_model ####################\n table_name = 'logger_model'\n pk_id = 'model_id'\n uq_list = ['has_tilt', 'has_rain', 'has_piezo', 'has_soms', 'logger_type']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n has_tilt BOOLEAN,\n has_rain BOOLEAN,\n has_piezo BOOLEAN,\n has_soms BOOLEAN,\n logger_type SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger ####################\n table_name = 'logger'\n pk_id = 'logger_id'\n uq_list = ['sm_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'model_id': {'ref_schema': 'temporal', 'ref_table': 'logger_model'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n sm_id BIGINT,\n model_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### logger_mobile ####################\n table_name = 'logger_mobile'\n pk_id = 'mobile_id'\n uq_list = ['logger_id', 'activated']\n fk_dict = {'logger_id': {'ref_schema': 'temporal', 'ref_table': 'logger'},\n 'gsm_id': {'ref_schema': 'comm', 'ref_table': 'gsm_module'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n logger_id INTEGER,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n sim_num VARCHAR(12),\n gsm_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n #################### EXECUTE QUERY TO CREATE TABLES ####################\n for index in create_dict.keys():\n dct = create_dict[index]\n schema = dct['schema']\n table_name = dct['table_name']\n query = dct['query']\n pk_id = dct['pk_id']\n uq_list = dct['uq_list']\n fk_dict = dct['fk_dict']\n if len(fk_dict.keys()) == 0:\n fk_constraint = ''\n else:\n fk_constraint_list = ['']\n for fk_id in fk_dict.keys():\n ref_schema = fk_dict.get(fk_id)['ref_schema']\n ref_table = fk_dict.get(fk_id)['ref_table']\n fk_part = fk_query.format(table_name, ref_table, fk_id,\n \"{}.{}\".format(ref_schema, ref_table),\n fk_id)\n fk_constraint_list.append(fk_part)\n fk_constraint = ', '.join(fk_constraint_list)\n \n query = query.format(schema, table_name, pk_id, \n pk_contraint.format(table_name, pk_id),\n uq_contraint.format(table_name, ', '.join(uq_list)),\n \"{}\".format(fk_constraint))\n qdb.execute(query)", "def parse_view_page(self):\n for row in self.driver.find_elements_by_css_selector(\"table\"):\n cells = row.find_elements_by_tag_name(\"td\")\n for cell in cells:\n yield cell.text", "def print_all_tables(self):\n conn = self.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n print(cursor.fetchall())", "def print_tables(db):\n # connect to the database and create a cursor\n\n # select all columns using SQL command\n # 'SELECT * FROM StatelessCountByCountry'\n\n # print the data from StatelessCountByCountry\n\n # select all columns using SQL command\n # 'SELECT * FROM StatelessCountByRegion'\n\n # print the data from StatelessCountByRegion", "def get_context_data(self, **kwargs):\n\n context = super(MonFinView, self).get_context_data()\n if test_on == False:\n rezult_1, rezult_2, rezult_3, proc_error, title_1, title_2, titles_10, data_p, critical = processing_r()\n context['table_1'] = rezult_1\n context['table_2'] = rezult_2\n context['table_3'] = rezult_3\n context['titles_10'] = titles_10\n context['data_p'] = data_p\n context['critical'] = critical\n else: # Test data for the template\n table_1, table_23, proc_error, title_1, title_2, critical = templates_test()\n context['table_1'] = table_1\n context['table_2'] = table_23\n context['table_3'] = table_23\n context['critical'] = critical\n context['data_p'] = {'company': '',\n 'position': '',\n 'name': 'Копенок Виктор',\n 'phone': '',\n 'email': '[email protected]'\n }\n context['titles_1'] = title_1\n context['titles'] = title_2\n context['connect'] = proc_error\n context['version'] = VERSION\n return context", "def setup_tableview(self):\n\n # Reset the widget\n self.table_view.clear()\n\n self.table_view.setRowCount(len(self.sorted_keys))\n self.table_view.setColumnCount(8)\n\n # Set the horizontal headers' text and column width\n self.table_view.setHorizontalHeaderItem(0, QtWidgets.QTableWidgetItem('Title'))\n self.table_view.setColumnWidth(0, 150)\n\n self.table_view.setHorizontalHeaderItem(1, QtWidgets.QTableWidgetItem('Type'))\n self.table_view.setColumnWidth(1, 65)\n\n self.table_view.setHorizontalHeaderItem(2, QtWidgets.QTableWidgetItem('Score'))\n self.table_view.setColumnWidth(2, 60)\n\n self.table_view.setHorizontalHeaderItem(3, QtWidgets.QTableWidgetItem('Genre'))\n self.table_view.setColumnWidth(3, 150)\n\n self.table_view.setHorizontalHeaderItem(4, QtWidgets.QTableWidgetItem('Duration'))\n self.table_view.setColumnWidth(4, 300)\n\n self.table_view.setHorizontalHeaderItem(5, QtWidgets.QTableWidgetItem('Release Date'))\n self.table_view.setColumnWidth(5, 150)\n\n self.table_view.setHorizontalHeaderItem(6, QtWidgets.QTableWidgetItem('Credits'))\n self.table_view.setColumnWidth(6, 350)\n\n self.table_view.setHorizontalHeaderItem(7, QtWidgets.QTableWidgetItem('Summary'))\n self.table_view.setColumnWidth(7, 350)\n\n '''\n self.data_dict[title] = {\n 'score': score, → 7.7\n 'summary': summary, → 'Some string'\n 'duration': duration, → '100 episodes (7 Seasons in 2020), 43min per Episode' / '1h 55min'\n 'credits': creds_list, → ['Creators: some dude', 'Stars: hero, his chick, evil dude']\n 'genres': genres, → ['Drama', 'Fantasy', 'Comedy']\n 'released': year, → 2016\n 'type': show_type, → 'Movie' / 'Serie'\n }\n '''\n\n for i, title in enumerate(self.sorted_keys):\n\n # Adjust certain keys for better displaying\n title_genres = ', '.join(self.data_dict[title]['genres'])\n title_credits = '\\n'.join(self.data_dict[title]['credits'])\n title_score = str(self.data_dict[title]['score']) + '/10'\n\n # Set row height for each row depending on the amount of credits\n # (Producers:, Writers:, Stars: // Producers:, Stars:)\n self.table_view.setRowHeight(i, len(self.data_dict[title]['credits']) * 25)\n\n # Add column data for each row\n self.table_view.setItem(i, 0, QtWidgets.QTableWidgetItem(title))\n self.table_view.setItem(i, 1, QtWidgets.QTableWidgetItem(self.data_dict[title]['type']))\n self.table_view.setItem(i, 2, QtWidgets.QTableWidgetItem(title_score))\n self.table_view.setItem(i, 3, QtWidgets.QTableWidgetItem(title_genres))\n self.table_view.setItem(i, 4, QtWidgets.QTableWidgetItem(self.data_dict[title]['duration']))\n self.table_view.setItem(i, 5, QtWidgets.QTableWidgetItem(self.data_dict[title]['released']))\n self.table_view.setItem(i, 6, QtWidgets.QTableWidgetItem(title_credits))\n self.table_view.setItem(i, 7, QtWidgets.QTableWidgetItem(self.data_dict[title]['summary']))", "def test_get_table_list(self):\n db_introspection = DatabaseIntrospection(self.connection)\n cursor = mock.MagicMock()\n\n def list_tables(*args, **kwargs):\n return [[\"Table_1\", \"t\"], [\"Table_2\", \"t\"]]\n\n cursor.run_sql_in_snapshot = list_tables\n table_list = db_introspection.get_table_list(cursor=cursor)\n self.assertEqual(\n table_list,\n [\n TableInfo(name=\"Table_1\", type=\"t\"),\n TableInfo(name=\"Table_2\", type=\"t\"),\n ],\n )", "def handle(self, *args, **opts):\n if opts['empty'] and opts['for_real_empty']:\n # Empty Tables\n self.empty_tables()\n sys.exit()", "def create_tables(self):\n for name, attribute in self.__dict__.items():\n if hasattr(attribute, 'create_table_in_sqlite_db'):\n attribute.create_table_in_sqlite_db()", "def get_context_data(self, **kwargs):\n start, end = self.get_start_end_dates(self.request)\n if start or end is not None:\n orders = self.get_orders_with_range(start, end, False)\n \"\"\"\n HERE we use RAW SQL queries. It is ease than construct huge queryset.\n \"\"\"\n with open(os.path.join(CUR_DIR, 'sql', 'accountant_summary.sql.tpl'), 'r') as sqlfile:\n raw_sql = sqlfile.read()\n raw_sql = raw_sql.format(\n orderitem_tbl=OrderItem._meta.db_table,\n product2category_tbl=product_models.Product.categories.through._meta.db_table,\n order_tbl=Order._meta.db_table,\n open_date=pytz.utc.normalize(start).strftime('%Y-%m-%d %H:%M:%S'),\n close_date=pytz.utc.normalize(end).strftime('%Y-%m-%d %H:%M:%S'),\n )\n connection = get_default_db_connection(self.request)\n cursor = connection.cursor()\n cursor.execute(raw_sql)\n columns = [col[0] for col in cursor.description]\n category_data = {}\n total_discount = orders.aggregate(discounts=Sum('discount_total'))['discounts'] or 0\n total_quantity = 0\n total_sales = 0\n for row in cursor.fetchall():\n cdata = dict(zip(columns, row))\n category_data[cdata['category_id']] = cdata\n # total_discount += cdata['discount']\n total_quantity += cdata['amount']\n total_sales += cdata['sales']\n\n categories = dict(\n (c['id'], c)\n for c in product_models.Category.objects.all().values('id', 'name', 'parent', 'active', 'archived'))\n categories[None] = {'id': None, 'name': 'Uncategorized Items',\n 'parent': None, 'active': True, 'archived': False}\n for cid in categories:\n categories[cid].update({'sales': 0, 'amount': 0, 'percentage': 0, 'level': 0, 'child_cnt': 0})\n for cid in categories:\n if cid in category_data:\n categories[cid]['sales'] = category_data[cid]['sales']\n categories[cid]['amount'] = category_data[cid]['amount']\n if total_sales > 0:\n categories[cid]['percentage'] = 100.0 * categories[cid]['sales'] / total_sales\n parent_id = categories[cid]['parent']\n prev_parent = None\n while parent_id:\n if prev_parent is not None and prev_parent == parent_id:\n # ERROR!! Category has itself as parent!\n break\n prev_parent = parent_id\n categories[parent_id]['child_cnt'] += 1\n parent_id = categories[parent_id]['parent']\n categories[cid]['level'] += 1\n # sorting categories tree\n sorted_categories = []\n maxlevel = max(ctg['level'] for _, ctg in categories.items())\n for clevel in range(maxlevel + 1):\n thislevel_cats = [ctg for ctg in categories.values()\n if ctg['level'] == clevel and not\n ((not ctg['active'] or ctg['archived']) and\n ctg['child_cnt'] == 0 and ctg['amount'] == 0)]\n thislevel_cats = sorted(thislevel_cats, key=lambda x: (x['sales'], x['amount'], x['name']))\n if clevel == 0:\n sorted_categories = list(reversed(thislevel_cats))\n continue\n for subcat in thislevel_cats:\n if (not subcat['active'] or subcat['archived']) and subcat['child_cnt'] == 0 \\\n and subcat['amount'] == 0:\n # do not show last items if they are not active\n continue\n parent_pos = [pos for pos, c in enumerate(sorted_categories)\n if c['id'] == subcat['parent']] or [0]\n sorted_categories.insert(parent_pos[0] + 1, subcat)\n\n results = {\n 'discount': total_discount,\n 'categories': sorted_categories,\n 'total': total_quantity,\n 'total_sales': total_sales,\n 'start_date': start.strftime('%B %d, %Y'),\n 'end_date': end.strftime('%B %d, %Y'),\n }\n return results\n else:\n return {}", "def update(self):\n current = LazyRegister(self.db)\n current.render()\n cur = self.db.cursor()\n for table in self.tables:\n if table in current.tables:\n additions, removals = current.tables[table].migrate(self.tables[table])\n for addition in additions:\n cur.execute(\"\"\"ALTER TABLE %s ADD COLUMN %s\"\"\" % (\n table, addition[1].get_sql()\n ))\n print('Added column: ', addition[0])\n for removal in removals:\n #cur.execute(\"\"\"ALTER TABLE %s DROP COLUMN %s\"\"\" % (\n # table, removal[0]\n #))\n #print('Removed column: ', removal[0])\n print('Did not removed column: ', removal[0])\n else:\n schema = self.tables[table].get_create_table_sql()\n cur.execute(schema)\n print('Added table %s' % table)", "def execute(self, context):\n #### Postgres SELECT query block\n try:\n # Init Airflow Postgres Hook\n pg = PostgresHook(postgres_conn_id=self.postgres_conn_id)\n # Get records via an SQL query\n # with open(self.sql) as sql_file: sql = sql_file.read()\n records = pg.get_records(sql=self.sql)\n # Raise block exception \n except: raise\n #### Transformations block\n try:\n # Apply transformation function\n results = self.transform_function(records)\n # Raise block exception \n except: raise \n #### JSON export block\n try:\n # Get file absolute path\n self.abs_file_path = get_absolute_path(self.file_path, context)\n # Export as JSON file\n with open(self.abs_file_path, \"w\") as json_file:\n json.dump(results, json_file, indent=4)\n # Raise block exception \n except: raise", "def __init__(self, view_name, cursor=None, schema=None):\n self.name = view_name\n self.type = 'view' # Saves using type() or isinstance\n self.columns = {}\n self.sql = ''\n self.triggers = {}\n if schema:\n self.schema = schema\n else:\n schema = None\n if cursor:\n self._get_view(cursor)", "def setUp(self):\n self.db_handler = DynamoDBHandler(ModelTests.TABLE_NAME)\n self.init_table()\n self.items = {}\n self.init_items()\n self.populate_table()" ]
[ "0.5740603", "0.5701369", "0.55218136", "0.5455442", "0.54200023", "0.54088736", "0.5407114", "0.53050184", "0.5299238", "0.5283333", "0.5257961", "0.52211183", "0.52096987", "0.5201612", "0.5185067", "0.5168369", "0.5159047", "0.51454693", "0.51377684", "0.5123308", "0.5118604", "0.5116085", "0.5092654", "0.5083858", "0.50785345", "0.5059289", "0.50573915", "0.50562805", "0.5010703", "0.5009059", "0.4974727", "0.4971839", "0.49558315", "0.49445987", "0.49441716", "0.49349403", "0.49336806", "0.4925932", "0.49164727", "0.48933387", "0.48917428", "0.4862327", "0.4856635", "0.48560116", "0.48507178", "0.4847269", "0.48431912", "0.48404768", "0.48294833", "0.48283595", "0.4825574", "0.4822002", "0.4821636", "0.48196018", "0.4818189", "0.48121536", "0.4811985", "0.481107", "0.47978008", "0.479495", "0.47891727", "0.47880712", "0.47855684", "0.47840804", "0.47837105", "0.4783689", "0.4781714", "0.47806174", "0.47611737", "0.4760155", "0.4759462", "0.4755213", "0.4751725", "0.47504598", "0.4749539", "0.47468537", "0.47383857", "0.47368047", "0.4733707", "0.47291064", "0.4727585", "0.47182444", "0.4714813", "0.47115037", "0.47073728", "0.47047415", "0.46986765", "0.46931505", "0.46885213", "0.4688232", "0.46861145", "0.46846226", "0.4680672", "0.46806642", "0.4669556", "0.4667989", "0.46675593", "0.46674007", "0.46625292", "0.46625173" ]
0.4954838
33
check if the table is partitioned table and return the partition details
def get_table_partition_details( # pylint: disable=unused-argument self, table_name: str, schema_name: str, inspector: Inspector, ) -> Tuple[bool, Optional[TablePartition]]: return False, None # By default the table will be a Regular Table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_partitioned(self):\n ## check if the table are partitioned, need the split because of a change in the type of partitions in pydantic\n partitions = self.table_config[\"partitions\"]\n if partitions is None or len(partitions) == 0:\n return False\n\n if isinstance(partitions, list):\n # check if any None inside list.\n # False if it is the case Ex: [None, 'partition']\n # True otherwise Ex: ['partition1', 'partition2']\n return all(item is not None for item in partitions)\n\n raise ValueError(\"Partitions must be a list or None\")", "def fetch_partitions(self, table_name):\n partition_result = self.query(\n sql.fetch_partition,\n (\n self._current_db,\n table_name,\n ),\n )\n # If a table doesn't have partition schema the \"PARTITION_NAME\"\n # will be string \"None\" instead of something considered as false\n # in python\n return [\n partition_entry[\"PARTITION_NAME\"]\n for partition_entry in partition_result\n if partition_entry[\"PARTITION_NAME\"] != \"None\"\n ]", "def get_partitioning(self):\n raise Exception(\"Unimplemented\")", "def sd_partition_table():\n table = {}\n for partition in sd_partitions():\n table[partition] = sd_part_size(partition)\n return table", "def _get_partition_list(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def get_partitioning(disk):\n\n #TODO\n return \"Unknown\"", "def getPartition(self):\n\t\treturn self.partition", "def partitionname(self) :\n\t\ttry :\n\t\t\treturn self._partitionname\n\t\texcept Exception as e:\n\t\t\traise e", "def partition(self):\n return self.tag(\"partition\")", "def get_partition(self, partid):\n #TODO(zhengda) add implementation later.", "def get_partition(self, partid):\n #TODO(zhengda) add implementation later.", "def usb_partition_table():\n table = {}\n for partition in usb_partitions():\n table[partition] = int(usb_part_size(partition))\n return table", "def partitionWasCreated(partitionTableName):\n return partitionTableName in partitionCreationHistory", "def is_partition(disk): #TODO: Could change to use \"Whole\" attrib. Good idea?\n\n return \"s\" in disk.split(\"disk\")[1]", "def _random_sample_for_partitioned_tables(self) -> Query:\n partition_field = self._partition_details[\"partition_field\"]\n col = self.table.__table__.c.get(partition_field.lower())\n col_type = None\n if col is not None:\n col_type = col.type\n if partition_field == \"_PARTITIONDATE\":\n col_type = sqlalchemy.DATE\n if partition_field == \"_PARTITIONTIME\":\n col_type = sqlalchemy.DATETIME()\n\n if not self._partition_details.get(\"partition_values\"):\n sample = (\n self.session.query(self.table)\n .filter(\n format_partition_datetime(\n partition_field,\n self._partition_details[\"partition_interval\"],\n self._partition_details[\"partition_interval_unit\"],\n col_type,\n )\n )\n .subquery()\n )\n return aliased(self.table, sample)\n sample = (\n self.session.query(self.table)\n .filter(\n column(partition_field).in_(self._partition_details[\"partition_values\"])\n )\n .subquery()\n )\n return aliased(self.table, sample)", "def test_partitions(os_partition_table):\n\n # # independent tests\n # tests that work as non-root user\n #\n # get independent list of partitions from kernel\n\n re_part = re.compile(' (sd[a-z][1-9])$')\n lines_out = os_one_liner('cat /proc/partitions')\n proc_parts = [] # partitions from /proc/partitions\n for line in lines_out:\n if re_part.search(line):\n proc_parts += [re_part.search(line).group(1)]\n\n # Are partitions from proc_parts in partition_table\n for d_part in proc_parts:\n test = f'/dev/{d_part}'\n # some partitions from /proc/partitions are not block devices\n # assert test in [v.dev for i, v in os_partition_table.partitions.items()]\n for key, value in os_partition_table.partitions.items():\n assert key == value.dev\n assert value.disk in key\n assert value.part_num in key\n # more tests", "def list_partitions(self, partitioning):\n return []", "def sync_table_partitions(self) -> None:\n log.info(\"== Stage 5.1: Check table partitions are up-to-date ==\")\n\n # we're using partitions in the ddl file, skip syncing anything\n if not self.rm_partition:\n return\n # not a partitioned table, nothing to do\n if not self.partitions:\n return\n\n # only apply this logic to RANGE partitioning, as other types\n # are usually static\n partition_method = self.get_partition_method(\n self._current_db, self.new_table_name\n )\n if partition_method != \"RANGE\":\n return\n\n try:\n new_tbl_parts = self.list_partition_names(self.new_table_name)\n orig_tbl_parts = self.list_partition_names(self.table_name)\n\n parts_to_drop = set(new_tbl_parts) - set(orig_tbl_parts)\n parts_to_add = set(orig_tbl_parts) - set(new_tbl_parts)\n\n # information schema literally has the string None for\n # non-partitioned tables. Previous checks *should* prevent us\n # from hitting this.\n if \"None\" in parts_to_add or \"None\" in parts_to_drop:\n log.warning(\n \"MySQL claims either %s or %s are not partitioned\",\n self.new_table_name,\n self.table_name,\n )\n return\n\n if parts_to_drop:\n log.info(\n \"Partitions missing from source table \"\n \"to drop from new table %s: %s\",\n self.new_table_name,\n \", \".join(parts_to_drop),\n )\n if parts_to_add:\n log.info(\n \"Partitions in source table to add to new table %s: %s\",\n self.new_table_name,\n \", \".join(parts_to_add),\n )\n self.apply_partition_differences(parts_to_drop, parts_to_add)\n except Exception:\n log.exception(\n \"Unable to sync new table %s with orig table %s partitions\",\n self.new_table_name,\n self.table_name,\n )", "def get_partition(self):\n return self._partition", "def check_partition_exist(table_name, partition_criteria, query=None):\n if query == None:\n query = partition_criteria\n shell_cmd = u\"hive -e \\\"show partitions %s partition(%s)\\\" | grep \\\"%s\\\" | wc | awk '{print $1}' \" % (table_name, partition_criteria, query)\n #print \"shell comand is : \", shell_cmd\n cmd = [u\"-c\", shell_cmd]\n algo_logger.info(\" \".join(cmd))\n p = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell=True)\n out, err = p.communicate()\n #print out.strip()\n if p.returncode != 0:\n return 0\n if int(out.strip()) > 0:\n return 1\n return 0", "def get_partition():\n if selection is None:\n warning(\"You need to pick something first.\")\n return\n if not selection.obj_type in ['partition']:\n warning(\"You need to partition the selection first.\")\n return\n res = askItems([['property',[1]]],\n caption='Partition property')\n if res:\n prop = res['property']\n getPartition(selection,prop)\n highlightPartitions(selection)", "async def getPartitionState(self):\n partition_state = await self.director.getItemVariableValue(\n self.item_id, \"PARTITION_STATE\"\n )\n return partition_state", "def partition(cls, key):\n return cls.partition_indexed(\n cls.hash_ring.select_bucket(key),\n )", "def partition_name(self):\n return self._infos.get(BulkInsertState.IMPORT_PARTITION, \"\")", "def partition_exists(self, partitioning, partition_id):\n raise Exception(\"unimplemented\")", "def partitions(self):\n self._get_latest_content()\n return self._data.get('partitions', [])", "def get_partitions(self):\n \n parts = client.Management.Partition.get_partition_list()\n partitions = []\n for part in parts:\n partitions.append(part['partition_name'])\n return partitions", "def partition(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"partition\")", "def needPartitionTableUpdate(self):\n n_table=list()\n d_table=self.destination.getPartitionTable()\n s_table=self.source.getPartitionTable()\n for i in range(len(s_table)):\n n_table.append(re.sub(self.source.getDeviceName(), \\\n self.destination.getDeviceName(), \\\n s_table[i]))\n if d_table == n_table:\n return False\n else:\n return True", "def test_partition_keys(self):\r\n class ModelWithPartitionKeys(cqlengine.Model):\r\n id = columns.UUID(primary_key=True, default=lambda:uuid4())\r\n c1 = cqlengine.Text(primary_key=True)\r\n p1 = cqlengine.Text(partition_key=True)\r\n p2 = cqlengine.Text(partition_key=True)\r\n\r\n cols = ModelWithPartitionKeys._columns\r\n\r\n self.assertTrue(cols['c1'].primary_key)\r\n self.assertFalse(cols['c1'].partition_key)\r\n\r\n self.assertTrue(cols['p1'].primary_key)\r\n self.assertTrue(cols['p1'].partition_key)\r\n self.assertTrue(cols['p2'].primary_key)\r\n self.assertTrue(cols['p2'].partition_key)\r\n\r\n obj = ModelWithPartitionKeys(p1='a', p2='b')\r\n self.assertEquals(obj.pk, ('a', 'b'))", "def provide_partition_info(self):\n self.partition_info = True", "def createPartitions(self, databaseCursor, iterator):\n self.logger.debug(\"%s - in createPartitions\", threading.currentThread().getName())\n partitionTableClasses = getOrderedPartitionList([self.__class__])\n #self.logger.debug(\"DEBUG - Classes are %s\",partitionTableClasses)\n uniqueItems = [x for x in iterator]\n for tableClass in partitionTableClasses:\n tableObject = self\n if not self.__class__ == tableClass:\n tableObject = tableClass(logger = self.logger)\n #self.logger.debug(\"DEBUG - Handling %s /w/ sql %s\",tableObject.name,tableObject.partitionCreationSqlTemplate)\n tableObject._createOwnPartition(databaseCursor,uniqueItems)", "def get_partition(self, partition_spec):\n return self.partitions[partition_spec]", "def tm_partition(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"tm_partition\")", "def partition_book(self):\n ...", "def max_partition(\n table, schema=\"default\", field=None, filter_map=None, metastore_conn_id=\"metastore_default\"\n):\n from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook\n\n if \".\" in table:\n schema, table = table.split(\".\")\n hive_hook = HiveMetastoreHook(metastore_conn_id=metastore_conn_id)\n return hive_hook.max_partition(schema=schema, table_name=table, field=field, filter_map=filter_map)", "def get_partition_cfg(partition_type: str) -> Dict:\n raise NotImplementedError('Not supported yet.')", "def partition_id(self) -> 'outputs.PreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId':\n return pulumi.get(self, \"partition_id\")", "def test_ingest_dataframe_partition(\n self, mocked_client, mocker, partitioned_df, tmp_path\n ):\n mocked_client._core_service_stub = Core.CoreServiceStub(\n grpc.insecure_channel(\"\")\n )\n\n mocker.patch.object(\n mocked_client._core_service_stub,\n \"GetFeatureTable\",\n return_value=_ingest_test_getfeaturetable_mocked_resp(\n f\"file://{tmp_path}\", \"date\"\n ),\n )\n\n mocked_client.set_project(\"my_project\")\n ft = mocked_client.get_feature_table(\"ingest_featuretable\")\n mocked_client.ingest(ft, partitioned_df, timeout=600)\n\n pq_df = pq.read_table(tmp_path).to_pandas().drop(columns=[\"date\"])\n\n partitioned_df, pq_df = _ingest_test_format_dataframes(\n partitioned_df, pq_df, True\n )\n\n assert_frame_equal(partitioned_df, pq_df)", "def partition(self, dimension, processes=None):\n if processes:\n q = (self._table.source.isin(processes) |\n self._table.target.isin(processes))\n values = self._table.loc[q, dimension].unique()\n else:\n values = self._table[dimension].unique()\n return Partition.Simple(dimension, values)", "def _validate_partitioning(device):\n try:\n # Ensure we re-read the partition table before we try to list\n # partitions\n utils.execute('partprobe', device, run_as_root=True,\n attempts=CONF.disk_utils.partprobe_attempts)\n except (processutils.UnknownArgumentError,\n processutils.ProcessExecutionError, OSError) as e:\n LOG.warning(\"Unable to probe for partitions on device %(device)s \"\n \"after writing the image, the partitioning table may \"\n \"be broken. Error: %(error)s\",\n {'device': device, 'error': e})\n\n try:\n nparts = len(disk_utils.list_partitions(device))\n except (processutils.UnknownArgumentError,\n processutils.ProcessExecutionError, OSError) as e:\n msg = (\"Unable to find a valid partition table on the disk after \"\n f\"writing the image. The image may be corrupted. Error: {e}\")\n raise exception.InstanceDeployFailure(msg)\n\n # Check if there is at least one partition in the partition table after\n # deploy\n if not nparts:\n msg = (\"No partitions found on the device {} after writing \"\n \"the image.\".format(device))\n raise exception.InstanceDeployFailure(msg)", "def partition_key(self) -> str:\n return pulumi.get(self, \"partition_key\")", "def test_partition_tables_types(sdc_builder, sdc_executor, gcp, partition_type, file_format):\n\n if Version(sdc_builder.version) < Version('5.5.0') and file_format == 'JSON':\n pytest.skip('JSON staging introduced in 5.5.0')\n\n bucket_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n records_count = 20\n\n partition = {\"dataset\": dataset_name,\n \"table\": table_name,\n \"partitionType\": partition_type,\n \"timePartitionExpiration\": 0}\n\n if partition_type == 'INGESTION':\n # it could be whatever, we do not partition on any column here\n partition[\"timePartitionType\"] = \"MONTH\"\n data_type = 'STRING'\n elif partition_type == 'TIMESTAMP':\n partition[\"columnName\"] = \"partition_column\"\n partition[\"timePartitionType\"] = \"MONTH\"\n data_type = 'DATETIME'\n elif partition_type in ['DATE', 'DATETIME']:\n partition[\"columnName\"] = \"partition_column\"\n partition[\"timePartitionType\"] = \"MONTH\"\n data_type = partition_type\n elif partition_type == 'INTEGER':\n partition[\"columnName\"] = \"partition_column\"\n partition[\"integerPartitionStart\"] = -1000\n partition[\"integerPartitionStep\"] = 100\n partition[\"integerPartitionEnd\"] = 1000\n data_type = partition_type\n\n # Build the pipeline\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev data generator\n dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')\n dev_data_generator.set_attributes(batch_size=10,\n records_to_be_generated=records_count,\n fields_to_generate=[\n {\"type\": data_type, \"field\": \"partition_column\"},\n {\"type\": \"POKEMON\", \"field\": \"name\"}\n ])\n\n # Google BigQuery destination stage\n bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n bigquery.set_attributes(project_id=gcp.project_id,\n dataset=dataset_name,\n table=table_name,\n bucket=bucket_name,\n staging_file_format=file_format,\n enable_data_drift=True,\n create_table=True,\n create_dataset=True,\n purge_stage_file_after_ingesting=True,\n partition_table=True,\n partition_configuration=[partition])\n\n dev_data_generator >> bigquery\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n\n bigquery_client = gcp.bigquery_client\n dataset_ref = DatasetReference(gcp.project_id, dataset_name)\n\n try:\n logger.info(f'Creating temporary bucket {bucket_name}')\n bucket = gcp.retry_429(gcp.storage_client.create_bucket)(bucket_name)\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Verify by reading records using Google BigQuery client\n table = bigquery_client.get_table(f'{dataset_name}.{table_name}')\n data_from_bigquery = [tuple(row.values()) for row in bigquery_client.list_rows(table)]\n data_from_bigquery.sort()\n\n # Assert table is partitioned as well\n if partition_type == 'INTEGER':\n assert table.range_partitioning.field == 'partition_column'\n assert table.range_partitioning.range_.start == -1000\n assert table.range_partitioning.range_.interval == 100\n assert table.range_partitioning.range_.end == 1000\n elif partition_type == 'INGESTION':\n assert table.time_partitioning.type_ == 'MONTH'\n else:\n assert table.time_partitioning.field == 'partition_column'\n assert table.time_partitioning.type_ == 'MONTH'\n # And that we have records in the table\n assert len(data_from_bigquery) == records_count\n finally:\n _clean_up_bigquery(bigquery_client, dataset_ref)\n _clean_up_gcs(gcp, bucket, bucket_name)", "def __create_partition(self,partition_dt):\n\n p_array = self.__partition_date_to_path_array(partition_dt)\n \n # For each component, fetch the group or create it\n # Year\n try:\n y_group = self.root_group._f_get_child(p_array[0])\n except tables.NoSuchNodeError:\n y_group = self.file.create_group(self.root_group,p_array[0])\n\n # Month\n try:\n m_group = y_group._f_get_child(p_array[1])\n except tables.NoSuchNodeError:\n m_group = self.file.create_group(y_group,p_array[1])\n\n # Day\n try:\n d_group = m_group._f_get_child(p_array[2])\n except tables.NoSuchNodeError:\n d_group = self.file.create_group(m_group,p_array[2])\n\n # We need to create the table in the day group\n ts_data = self.file.create_table(d_group,'ts_data',self.table_description,self.table_title,\n self.table_filters, self.table_expectedrows, self.table_chunkshape, self.table_byteorder)\n\n # Need to save this as an attribute because it doesn't seem to be saved anywhere\n ts_data.attrs._TS_TABLES_EXPECTEDROWS_PER_PARTITION = self.table_expectedrows\n\n return ts_data", "def partition(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"partition\")", "def partition(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"partition\")", "def get_this_partition(adapter):\n myid = u.my_partition_id()\n\n # There will almost always be fewer VIOSes than LPARs. Since we're\n # querying without xags, it should be very quick.\n vio_wraps = vios.VIOS.search(adapter, id=myid)\n if len(vio_wraps) == 1:\n return vio_wraps[0]\n\n # We delay the query to the LPARs because there could be hundreds of them.\n # So we don't want to query it unless we need to.\n lpar_wraps = lpar.LPAR.search(adapter, id=myid)\n if len(lpar_wraps) == 1:\n return lpar_wraps[0]\n\n # If we made it here, something is wrong.\n raise ex.ThisPartitionNotFoundException(\n count=len(vio_wraps + lpar_wraps), lpar_id=myid)", "def get_type(self):\n return DOS_PARTITIONS[self.partition_type]", "def get_partition_type(part):\n blkid, _ = command(\n [\n 'blkid',\n '-p',\n '-o', 'udev',\n part,\n ]\n )\n saw_part_entry = False\n for line in blkid.splitlines():\n (key, value) = line.split('=')\n if key == 'ID_PART_ENTRY_TYPE':\n return value\n if key == 'ID_PART_ENTRY_SCHEME':\n table_type = value\n if key.startswith('ID_PART_ENTRY_'):\n saw_part_entry = True\n\n # hmm, is it in fact GPT?\n table_type = None\n base = get_partition_base(part)\n blkid, _ = command(\n [\n 'blkid',\n '-p',\n '-o', 'udev',\n base\n ]\n )\n for line in blkid.splitlines():\n (key, value) = line.split('=')\n if key == 'ID_PART_TABLE_TYPE':\n table_type = value\n if table_type != 'gpt':\n return None # not even GPT\n\n if saw_part_entry:\n return None # GPT, and blkid appears to be new, so we're done.\n\n # bah, fall back to sgdisk.\n if 'blkid' not in warned_about:\n LOG.warning('Old blkid does not support ID_PART_ENTRY_* fields, trying sgdisk; may not correctly identify ceph volumes with dmcrypt')\n warned_about['blkid'] = True\n (base, partnum) = split_dev_base_partnum(part)\n sgdisk, _ = command(\n [\n 'sgdisk',\n '-p',\n base,\n ]\n )\n\n for line in sgdisk.splitlines():\n m = re.search('\\s+(\\d+)\\s+\\d+\\s+\\d+\\s+\\S+ \\S+B\\s+\\S+\\s+(.*)', line)\n if m is not None:\n num = m.group(1)\n if num != partnum:\n continue\n desc = m.group(2)\n # assume unencrypted ... blkid has failed us :(\n if desc == 'ceph data':\n return OSD_UUID\n if desc == 'ceph journal':\n return JOURNAL_UUID\n\n return None", "def _record_specific_partition(r_d, numnodes, cur):\n # No partitioning has been specified. Create the appropriate entries.\n if r_d['partmtd'] == 0:\n for i in range(1, numnodes + 1):\n Database.execute(cur, 'UPDATE dtables '\n 'SET partmtd = 0 '\n 'WHERE nodeid = ? AND tname = ?',\n ErrorHandle.raise_handler, (i, r_d['tname']))\n\n # Range partitioning has been specified. Create the appropriate entries.\n elif r_d['partmtd'] == 1:\n for i in range(1, numnodes + 1):\n Database.execute(cur, 'UPDATE dtables '\n 'SET partcol = ?, partparam1 = ?, '\n 'partparam2 = ?, partmtd = 1 '\n 'WHERE nodeid = ? AND tname = ?',\n ErrorHandle.raise_handler,\n (r_d['partcol'], r_d['param1'][i - 1], r_d['param2'][i - 1], i,\n r_d['tname']))\n\n # Hash partitioning has been specified. Create the appropriate entries.\n elif r_d['partmtd'] == 2:\n for i in range(1, numnodes + 1):\n Database.execute(cur, 'UPDATE dtables '\n 'SET partcol = ?, partparam1 = ?, partmtd = 2 '\n 'WHERE nodeid = ? AND tname = ?',\n ErrorHandle.raise_handler,\n (r_d['partcol'], r_d['param1'], i, r_d['tname']))", "def get_partition(self, nIndex):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevHd_GetPartition', self.handle, nIndex))", "def partition_path(self):\n return \"/kiel/groups/%s/partitions\" % self.group_name", "def test_partitioner_iter(self):\n partitioner = self.tx_client.SetPartitioner(\"xyzzy\", \"iddqd\")\n self.assertEqual(list(partitioner), [1])", "def partitions(self):\n return self._partitions", "def get_partition_keys(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[str]:\n if self.partition_fn:\n partitions = self.partition_fn(current_time)\n if all(isinstance(partition, Partition) for partition in partitions):\n return [partition.name for partition in partitions] # type: ignore # (illegible conditional)\n else:\n return partitions # type: ignore # (illegible conditional)\n else:\n check.opt_inst_param(\n dynamic_partitions_store, \"dynamic_partitions_store\", DynamicPartitionsStore\n )\n\n if dynamic_partitions_store is None:\n check.failed(\n \"The instance is not available to load partitions. You may be seeing this error\"\n \" when using dynamic partitions with a version of dagster-webserver or\"\n \" dagster-cloud that is older than 1.1.18.\"\n )\n\n return dynamic_partitions_store.get_dynamic_partitions(\n partitions_def_name=self._validated_name()\n )", "def get_partition_count(self) -> int:\n return self._service.partition_count", "def test_partition_tables_no_partition(sdc_builder, sdc_executor, gcp):\n bucket_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_el_var = \"${record:attribute('sdc.dataset.name')}\"\n table_el_var = \"${record:attribute('sdc.table.name')}\"\n records_count = 20\n\n # Build the pipeline\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev data generator\n dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')\n dev_data_generator.set_attributes(batch_size=10,\n records_to_be_generated=records_count,\n fields_to_generate=[\n {\"type\": \"POKEMON\", \"field\": \"name\"}\n ])\n\n # Build Expression Evaluator\n expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')\n expression_evaluator.set_attributes(header_attribute_expressions=[\n {'attributeToSet': 'sdc.dataset.name',\n 'headerAttributeExpression': dataset_name},\n {'attributeToSet': 'sdc.table.name',\n 'headerAttributeExpression': table_name}]\n )\n\n # Google BigQuery destination stage\n bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n bigquery.set_attributes(project_id=gcp.project_id,\n dataset=dataset_el_var,\n table=table_el_var,\n bucket=bucket_name,\n enable_data_drift=True,\n create_table=True,\n create_dataset=True,\n purge_stage_file_after_ingesting=True,\n partition_table=True,\n partition_configuration=[\n {\"dataset\": \"wrong_dataset\",\n \"table\": \"wrong_table\",\n \"partitionType\": \"INGESTION\",\n \"timePartitionType\": \"MONTH\",\n \"timePartitionExpiration\": 0}\n ])\n\n dev_data_generator >> expression_evaluator >> bigquery\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n\n bigquery_client = gcp.bigquery_client\n dataset_ref = DatasetReference(gcp.project_id, dataset_name)\n\n try:\n logger.info(f'Creating temporary bucket {bucket_name}')\n bucket = gcp.retry_429(gcp.storage_client.create_bucket)(bucket_name)\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Verify by reading records using Google BigQuery client\n table = bigquery_client.get_table(f'{dataset_name}.{table_name}')\n data_from_bigquery = [tuple(row.values()) for row in bigquery_client.list_rows(table)]\n data_from_bigquery.sort()\n\n # Assert table is not partitioned\n assert not table.time_partitioning\n # And that we have records in the table\n assert len(data_from_bigquery) == records_count\n finally:\n _clean_up_bigquery(bigquery_client, dataset_ref)\n _clean_up_gcs(gcp, bucket, bucket_name)", "def partition_key(self):\n try:\n return self._annotations[self._partition_key]\n except KeyError:\n return self._annotations.get(EventData.PROP_PARTITION_KEY, None)", "def partition(attrs, df, partitions):\n if attrs in partitions:\n return partitions[attrs]\n shape = df.drop_duplicates(attrs).shape[0]\n partitions[attrs] = shape\n return shape", "def _partitioner(shape, dtype):\n if not isinstance(shape, tensor_shape.TensorShape):\n raise ValueError(f\"shape is not a TensorShape: {shape}\")\n if not shape.is_fully_defined():\n raise ValueError(f\"shape is not fully defined: {shape}\")\n if not isinstance(dtype, dtypes.DType):\n raise ValueError(f\"dtype is not a DType: {dtype}\")\n\n if dtype.base_dtype == dtypes.string:\n element_size = bytes_per_string_element\n else:\n element_size = dtype.size\n\n partitions = [1] * shape.ndims\n bytes_per_slice = 1.0 * (\n shape.num_elements() / shape.dims[axis].value) * element_size\n # How many slices can we fit on one shard of size at most max_shard_bytes?\n # At least one slice is required.\n slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice))\n # How many shards do we need for axis given that each shard fits\n # slices_per_shard slices from a total of shape[axis] slices?\n axis_shards = int(math.ceil(\n 1.0 * shape.dims[axis].value / slices_per_shard))\n if max_shards:\n axis_shards = min(max_shards, axis_shards)\n\n partitions[axis] = axis_shards\n\n return partitions", "def get_partitions(self):\n return self.partitions", "def postgres_auto_partition(\n model: PostgresPartitionedModel,\n count: int,\n interval_unit: PostgresAutoPartitioningIntervalUnit,\n interval: int,\n start_from: Optional[date] = None,\n using=\"default\",\n):\n\n connection = connections[using]\n\n with connection.cursor() as cursor:\n table = connection.introspection.get_partitioned_table(\n cursor, model._meta.db_table\n )\n\n if not table:\n raise PostgresAutoPartitioningError(\n f\"Model {model.__name__}, with table {model._meta.db_table} \"\n \"does not exists in the database. Did you run \"\n \"`python manage.py migrate`?\"\n )\n\n if table.method != PostgresPartitioningMethod.RANGE:\n raise PostgresAutoPartitioningError(\n f\"Table {table.name} is not partitioned by a range. Auto partitioning \"\n \"only supports partitioning by range.\"\n )\n\n schema_editor = connection.schema_editor()\n\n start_datetime = datetime.now()\n if interval_unit == PostgresAutoPartitioningIntervalUnit.MONTH:\n start_datetime = start_datetime.replace(day=1)\n elif interval_unit == PostgresAutoPartitioningIntervalUnit.WEEK:\n start_datetime = start_datetime - relativedelta(\n days=start_datetime.weekday()\n )\n\n for _ in range(count):\n if interval_unit == PostgresAutoPartitioningIntervalUnit.MONTH:\n end_datetime = start_datetime + relativedelta(months=+interval)\n partition_name = start_datetime.strftime(\"%Y_%b\").lower()\n elif interval_unit == PostgresAutoPartitioningIntervalUnit.WEEK:\n end_datetime = start_datetime + relativedelta(weeks=+interval)\n partition_name = start_datetime.strftime(\"%Y_week_%W\").lower()\n\n from_values = start_datetime.strftime(\"%Y-%m-%d\")\n to_values = end_datetime.strftime(\"%Y-%m-%d\")\n\n logger = LOGGER.bind(\n model_name=model.__name__,\n name=partition_name,\n from_values=from_values,\n to_values=to_values,\n )\n\n if start_from and start_datetime.date() < start_from:\n start_datetime = end_datetime\n logger.info(\n \"Skipping creation of partition, before specified start date\",\n start_from=start_from,\n )\n continue\n\n partition_table_name = schema_editor.create_partition_table_name(\n model, partition_name\n )\n\n existing_partition = next(\n (\n table_partition\n for table_partition in table.partitions\n if table_partition.name == partition_table_name\n ),\n None,\n )\n\n if existing_partition:\n start_datetime = end_datetime\n logger.info(\"Skipping creation of partition, already exists\")\n continue\n\n schema_editor.add_range_partition(\n model=model,\n name=partition_name,\n from_values=from_values,\n to_values=to_values,\n )\n\n logger.info(\"Created partition\")\n\n start_datetime = end_datetime", "def partition(game, player):\n height = game.height\n width = game.width\n blanks = game.get_blank_spaces()\n has_partition = False\n partition_col = int(game.width/2)\n partition_row = int(game.height/2)\n moves = game.get_legal_moves(player)\n if moves:\n player_location = game.get_player_location(player)\n for i in range(2, width - 3): #search for vertical partitions\n if (0,i) not in blanks and (0,i+1) not in blanks:\n j = 1\n while j < height and (j, i) not in blanks and (j, i + 1) not in blanks:\n j += 1\n if j == height:\n has_partition = True\n pb = partition_blanks(game, (0,i))\n if pb[0] > pb[1]: #more blanks on the left of the partition\n for move in moves:\n if move[1] < i:\n return has_partition, True\n return has_partition, False\n else: #more blanks on right of partition\n for move in moves:\n if move[1] > i + 1:\n return has_partition, True\n return has_partition, False\n\n for i in range(2, height - 3): #seach for horizontal partitions\n if (i,0) not in blanks and (i+1,0) not in blanks:\n j = 1\n while j < width and (i,j) not in blanks and (i+1, j) not in blanks:\n j += 1\n if j == width:\n has_partition = True\n pb = partition_blanks(game, (i, 0))\n if pb[0] > pb[1]: #more blanks on top of partition\n for move in moves:\n if move[0] < i:\n return has_partition, True\n return has_partition, False\n else: #more blanks below partition\n for move in moves:\n if move[0] > i + 1:\n return has_partition, True\n return has_partition, False\n\n return has_partition, False", "def partition_fulltext(self, fulltext):\n\n #Instantiate fulltext partitioner\n sfp = SciFullTextProcessor()\n\n try:\n #attempt to partition fulltext\n partitioned_text = sfp.get_partitioned_full_text(fulltext)\n\n except:\n #if partition fails, return original fulltext and key:value pair for debugging\n partitioned_text = {'error handling fulltext':'error calling partitioner',\n 'fulltext':fulltext}\n\n return partitioned_text", "def _get_partitions():\n partitions = []\n\n try:\n with files.FileReader('/proc/partitions') as f:\n lines = f.readlines()[2:]\n for line in lines:\n _, _, _, name = line.split()\n if name[-1].isdigit():\n partitions.append(name)\n # This will catch access denied and file not found errors, which is expected\n # on non-Linux/limited access systems. All other errors will raise as normal.\n except files.Error:\n pass\n\n return partitions", "def getPartitionList(self, softwareProfileName):\n return self._sp_db_api.getPartitionList(softwareProfileName)", "def test_get_node_partitions(self):\n pass", "def tm_partition(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tm_partition\")", "def tm_partition(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tm_partition\")", "def get_partitions(disk):\n partitions = []\n script = [\n 'select disk {}'.format(disk['Number']),\n 'list partition']\n\n try:\n # Run script\n result = run_diskpart(script)\n except subprocess.CalledProcessError:\n pass\n else:\n # Append partition numbers\n output = result.stdout.decode().strip()\n regex = r'Partition\\s+(\\d+)\\s+\\w+\\s+(\\d+\\s+\\w+)\\s+'\n for tmp in re.findall(regex, output, re.IGNORECASE):\n num = tmp[0]\n size = human_readable_size(tmp[1])\n partitions.append({'Number': num, 'Size': size})\n\n return partitions", "def is_bad_partition(par):\n return 'Letter' not in par or REGEX_BAD_PARTITION.search(par['FileSystem'])", "def partition_description(self) -> pulumi.Output[Optional[Any]]:\n return pulumi.get(self, \"partition_description\")", "def get_hyperpartition(self, hyperpartition_id):\n return self.session.query(self.Hyperpartition).get(hyperpartition_id)", "def metadata(self):\n return self._partition_meta_data", "def metadata(self):\n return self._partition_meta_data", "def get_partition_keys(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[T_str]:\n ...", "def partitioning_attribute(self):\n return self._partitioning_attribute", "def get_partition_details(disk, partition):\n details = {}\n script = [\n 'select disk {}'.format(disk['Number']),\n 'select partition {}'.format(partition['Number']),\n 'detail partition']\n\n # Diskpart details\n try:\n # Run script\n result = run_diskpart(script)\n except subprocess.CalledProcessError:\n pass\n else:\n # Get volume letter or RAW status\n output = result.stdout.decode().strip()\n tmp = re.search(r'Volume\\s+\\d+\\s+(\\w|RAW)\\s+', output)\n if tmp:\n if tmp.group(1).upper() == 'RAW':\n details['FileSystem'] = RAW\n else:\n details['Letter'] = tmp.group(1)\n # Remove empty lines from output\n tmp = [s.strip() for s in output.splitlines() if s.strip() != '']\n # Split each line on ':' skipping those without ':'\n tmp = [s.split(':') for s in tmp if ':' in s]\n # Add key/value pairs to the details variable and return dict\n details.update({key.strip(): value.strip() for (key, value) in tmp})\n\n # Get MBR type / GPT GUID for extra details on \"Unknown\" partitions\n guid = PARTITION_UIDS.get(details.get('Type').upper(), {})\n if guid:\n details.update({\n 'Description': guid.get('Description', '')[:29],\n 'OS': guid.get('OS', 'Unknown')[:27]})\n\n if 'Letter' in details:\n # Disk usage\n try:\n tmp = psutil.disk_usage('{}:\\\\'.format(details['Letter']))\n except OSError as err:\n details['FileSystem'] = 'Unknown'\n details['Error'] = err.strerror\n else:\n details['Used Space'] = human_readable_size(tmp.used)\n\n # fsutil details\n cmd = [\n 'fsutil',\n 'fsinfo',\n 'volumeinfo',\n '{}:'.format(details['Letter'])\n ]\n try:\n result = run_program(cmd)\n except subprocess.CalledProcessError:\n pass\n else:\n output = result.stdout.decode().strip()\n # Remove empty lines from output\n tmp = [s.strip() for s in output.splitlines() if s.strip() != '']\n # Add \"Feature\" lines\n details['File System Features'] = [s.strip() for s in tmp\n if ':' not in s]\n # Split each line on ':' skipping those without ':'\n tmp = [s.split(':') for s in tmp if ':' in s]\n # Add key/value pairs to the details variable and return dict\n details.update({key.strip(): value.strip() for (key, value) in tmp})\n\n # Set Volume Name\n details['Name'] = details.get('Volume Name', '')\n\n # Set FileSystem Type\n if details.get('FileSystem', '') not in ['RAW', 'Unknown']:\n details['FileSystem'] = details.get('File System Name', 'Unknown')\n\n return details", "def time_partitioning(self) -> 'outputs.TimePartitioningResponse':\n return pulumi.get(self, \"time_partitioning\")", "def partitions(self, topic):\n kc = KafkaCat(self)\n md = kc.metadata()\n topic = next(filter(lambda t: t[\"topic\"] == topic, md[\"topics\"]))\n\n def make_partition(p):\n index = p[\"partition\"]\n leader_id = p[\"leader\"]\n leader = None if leader_id == -1 else self.get_node(leader_id)\n replicas = [self.get_node(r[\"id\"]) for r in p[\"replicas\"]]\n return Partition(index, leader, replicas)\n\n return [make_partition(p) for p in topic[\"partitions\"]]", "def _get_partition(region_name: str) -> str:\n\n if region_name.startswith(\"us-gov\"):\n return \"aws-us-gov\"\n\n return \"aws\"", "def require_partition_filter(self) -> bool:\n return pulumi.get(self, \"require_partition_filter\")", "def get(self):\n return self._partition", "def test_ingest_dataframe_no_partition(\n self, mocked_client, mocker, non_partitioned_df, tmp_path\n ):\n mocked_client._core_service_stub = Core.CoreServiceStub(\n grpc.insecure_channel(\"\")\n )\n\n mocker.patch.object(\n mocked_client._core_service_stub,\n \"GetFeatureTable\",\n return_value=_ingest_test_getfeaturetable_mocked_resp(f\"file://{tmp_path}\"),\n )\n\n mocked_client.set_project(\"my_project\")\n ft = mocked_client.get_feature_table(\"ingest_featuretable\")\n mocked_client.ingest(ft, non_partitioned_df, timeout=600)\n\n # Since not partitioning, we're only looking for single file\n single_file = [\n f for f in os.listdir(tmp_path) if os.path.isfile(os.path.join(tmp_path, f))\n ][0]\n pq_df = pq.read_table(tmp_path / single_file).to_pandas()\n\n non_partitioned_df, pq_df = _ingest_test_format_dataframes(\n non_partitioned_df, pq_df\n )\n\n assert_frame_equal(non_partitioned_df, pq_df)", "def markPartitionCreated(partitionTableName):\n global partitionCreationHistory\n partitionCreationHistory.add(partitionTableName)", "def write_table(self, df):\n (part_names, grouped_df, part_offsets,) = _get_groups_and_offsets(\n df=df,\n partition_cols=self.partition_cols,\n preserve_index=self.common_args[\"index\"],\n )\n fs = ioutils._ensure_filesystem(None, self.path, None)\n fs.mkdirs(self.path, exist_ok=True)\n\n full_paths = []\n metadata_file_paths = []\n full_offsets = [0]\n\n for idx, keys in enumerate(part_names.itertuples(index=False)):\n subdir = fs.sep.join(\n [\n f\"{name}={val}\"\n for name, val in zip(self.partition_cols, keys)\n ]\n )\n prefix = fs.sep.join([self.path, subdir])\n fs.mkdirs(prefix, exist_ok=True)\n current_offset = (part_offsets[idx], part_offsets[idx + 1])\n num_chunks = 1\n parts = 1\n\n if self.max_file_size is not None:\n # get the current partition\n start, end = current_offset\n sliced_df = grouped_df[start:end]\n\n current_file_size = _get_estimated_file_size(sliced_df)\n if current_file_size > self.max_file_size:\n # if the file is too large, compute metadata for\n # smaller chunks\n parts = math.ceil(current_file_size / self.max_file_size)\n new_offsets = list(\n range(start, end, int((end - start) / parts))\n )[1:]\n new_offsets.append(end)\n num_chunks = len(new_offsets)\n parts = len(new_offsets)\n full_offsets.extend(new_offsets)\n else:\n full_offsets.append(end)\n\n curr_file_num = 0\n num_chunks = 0\n while num_chunks < parts:\n new_file_name = f\"{self.filename}_{curr_file_num}.parquet\"\n new_full_path = fs.sep.join([prefix, new_file_name])\n\n # Check if the same `new_file_name` exists and\n # generate a `new_file_name`\n while new_full_path in self._file_sizes and (\n self._file_sizes[new_full_path]\n + (current_file_size / parts)\n ) > (self.max_file_size):\n curr_file_num += 1\n new_file_name = (\n f\"{self.filename}_{curr_file_num}.parquet\"\n )\n new_full_path = fs.sep.join([prefix, new_file_name])\n\n self._file_sizes[new_full_path] = self._file_sizes.get(\n new_full_path, 0\n ) + (current_file_size / parts)\n full_paths.append(new_full_path)\n metadata_file_paths.append(\n fs.sep.join([subdir, new_file_name])\n )\n num_chunks += 1\n curr_file_num += 1\n else:\n self.filename = self.filename or _generate_filename()\n full_path = fs.sep.join([prefix, self.filename])\n full_paths.append(full_path)\n metadata_file_paths.append(\n fs.sep.join([subdir, self.filename])\n )\n full_offsets.append(current_offset[1])\n\n paths, metadata_file_paths, offsets = (\n full_paths,\n metadata_file_paths,\n full_offsets,\n )\n existing_cw_batch = defaultdict(dict)\n new_cw_paths = []\n partition_info = [(i, j - i) for i, j in zip(offsets, offsets[1:])]\n\n for path, part_info, meta_path in zip(\n paths,\n partition_info,\n metadata_file_paths,\n ):\n if path in self.path_cw_map: # path is a currently open file\n cw_idx = self.path_cw_map[path]\n existing_cw_batch[cw_idx][path] = part_info\n else: # path not currently handled by any chunked writer\n new_cw_paths.append((path, part_info, meta_path))\n\n # Write out the parts of grouped_df currently handled by existing cw's\n for cw_idx, path_to_part_info_map in existing_cw_batch.items():\n cw = self._chunked_writers[cw_idx][0]\n # match found paths with this cw's paths and nullify partition info\n # for partition_col values not in this batch\n this_cw_part_info = [\n path_to_part_info_map.get(path, (0, 0))\n for path in self._chunked_writers[cw_idx][1]\n ]\n cw.write_table(grouped_df, this_cw_part_info)\n\n if new_cw_paths:\n # Create new cw for unhandled paths encountered in this write_table\n new_paths, part_info, meta_paths = zip(*new_cw_paths)\n self._chunked_writers.append(\n (\n ParquetWriter(new_paths, **self.common_args),\n new_paths,\n meta_paths,\n )\n )\n new_cw_idx = len(self._chunked_writers) - 1\n self.path_cw_map.update({k: new_cw_idx for k in new_paths})\n self._chunked_writers[-1][0].write_table(grouped_df, part_info)", "def get_table_definition(db_name, schema_name, table_name, server_name, data_partition_column_name='', excluded_columns=()):\n server_name = '' if server_name == '127.0.0.1' or server_name == 'localhost' else server_name\n server_name = f'[{server_name}].' if server_name else ''\n\n sql = (\"SELECT T.name AS TABLE_NAME, C.name AS COLUMN_NAME, P.name AS DATA_TYPE, \"\n \"P.max_length AS SIZE, CAST(P.precision AS VARCHAR) + '/' + CAST(P.scale AS VARCHAR) AS PRECISION_SCALE, \"\n \"c.* FROM {0}[{1}].sys.objects AS T JOIN {0}[{1}].sys.columns AS C ON T.object_id = C.object_id \"\n \"JOIN {0}[{1}].sys.types AS P ON C.system_type_id = P.system_type_id \"\n \"JOIN sys.schemas ss ON (T.schema_id = ss.schema_id) \"\n \" WHERE T.type_desc = 'USER_TABLE' and ss.name = ? \"\n \"and T.name = ? and P.name != 'timestamp' and P.name != 'sysname' order by column_id asc\").format(server_name, db_name)\n\n columns = fetch_rows(sql, [schema_name, table_name])\n\n target_table_column_prefix = get_config()['TARGET_TABLE_COLUMN_PREFIX']\n out_columns = {}\n\n for column in columns:\n column['original_data_type'] = column['data_type']\n\n if column['column_name'].upper() in default_columns:\n column['target_table_column_name'] = target_table_column_prefix + column['column_name']\n else:\n column['target_table_column_name'] = column['column_name']\n\n # Update the data type for the data partition column\n if data_partition_column_name != '' and column['column_name'].upper() == data_partition_column_name.upper():\n column['data_type'] = 'datetime'\n\n out_columns[column['column_name'].upper()] = column\n\n if len(excluded_columns) > 0:\n for excluded_column in excluded_columns:\n out_columns.pop(excluded_column)\n\n return out_columns", "def estimate_peptide_partition_utilizations(cls, session: Session) -> list:\n return session.execute(f\"SELECT relname, reltuples::BIGINT FROM pg_class WHERE relname SIMILAR TO '{Peptide.__tablename__}_[0-9]{{3}}';\").fetchall()", "def __get_partition__(self, account, container, obj, part_shift):\n \n key = hash_path(account, container, obj, raw_digest=True)\n part = unpack_from('>I', key)[0] >> part_shift\n return part", "def getPartitionFunction(self, Tlist):\n\t\treturn _modes.hinderedrotor_partitionfunction(Tlist, self.frequency, self.barrier) ** self.degeneracy", "def get_partition_keys(self, current_time: Optional[datetime] = None) -> Sequence[str]:\n return self.partitions_def.get_partition_keys(current_time)", "def get_partition_cfg(partition_type: str, **kwargs) -> Dict:\n raise NotImplementedError", "def disk_partitions(all=False):\n phydevs = []\n f = open(\"/proc/filesystems\", \"r\")\n for line in f:\n if not line.startswith(\"nodev\"):\n phydevs.append(line.strip())\n\n retlist = []\n f = open('/etc/mtab', \"r\")\n for line in f:\n if not all and line.startswith('none'):\n continue\n fields = line.split()\n device = fields[0]\n mountpoint = fields[1]\n fstype = fields[2]\n if not all and fstype not in phydevs:\n continue\n if device == 'none':\n device = ''\n ntuple = disk_ntuple(device, mountpoint, fstype)\n retlist.append(ntuple)\n return retlist", "def exist_partition(self, partition_spec):\n return partition_spec in self.partitions", "def get_partition_boundaries(cls, session: Session):\n rows = session.execute(\"select pg_class.relname, pg_get_expr(pg_class.relpartbound, pg_class.oid, true) from pg_class where relname SIMILAR TO 'peptides_[0-9]{3}';\").fetchall()\n num_regex = re.compile(r\"\\d+\")\n partition_boundaries = []\n for row in rows:\n matches = re.findall(num_regex, row[1])\n partition_boundaries.append((row[0], int(matches[0]), int(matches[1])))\n return partition_boundaries", "def getPartitionFunction(self, Tlist):\n\t\treturn _modes.harmonicoscillator_partitionfunction(Tlist, self.frequency) ** self.degeneracy", "def test_partition_tables_default_partition(sdc_builder, sdc_executor, gcp):\n bucket_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name_1 = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name_1 = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name_2 = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name_2 = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_el_var = \"${record:attribute('sdc.dataset.name')}\"\n table_el_var = \"${record:attribute('sdc.table.name')}\"\n records_count = 20\n\n # Build the pipeline\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev data generator\n dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')\n dev_data_generator.set_attributes(batch_size=10,\n records_to_be_generated=records_count,\n fields_to_generate=[{\"type\": \"POKEMON\", \"field\": \"name\"},\n {\"type\": \"INTEGER\", \"field\": \"id\"}])\n\n # Build Stream Selector\n selector = pipeline_builder.add_stage('Stream Selector')\n\n # Build Expression Evaluators\n expression_evaluator_1 = pipeline_builder.add_stage('Expression Evaluator')\n expression_evaluator_1.set_attributes(header_attribute_expressions=[\n {'attributeToSet': 'sdc.dataset.name',\n 'headerAttributeExpression': dataset_name_1},\n {'attributeToSet': 'sdc.table.name',\n 'headerAttributeExpression': table_name_1}]\n )\n\n expression_evaluator_2 = pipeline_builder.add_stage('Expression Evaluator')\n expression_evaluator_2.set_attributes(header_attribute_expressions=[\n {'attributeToSet': 'sdc.dataset.name',\n 'headerAttributeExpression': dataset_name_2},\n {'attributeToSet': 'sdc.table.name',\n 'headerAttributeExpression': table_name_2}]\n )\n\n # Google BigQuery destination stage\n bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n bigquery.set_attributes(project_id=gcp.project_id,\n dataset=dataset_el_var,\n table=table_el_var,\n bucket=bucket_name,\n enable_data_drift=True,\n create_table=True,\n create_dataset=True,\n purge_stage_file_after_ingesting=True,\n partition_table=True,\n partition_configuration=[\n {\"dataset\": dataset_name_1,\n \"table\": table_name_1,\n \"partitionType\": \"INGESTION\",\n \"timePartitionType\": \"MONTH\",\n \"timePartitionExpiration\": 0},\n {\"defaultPartition\": True,\n \"partitionType\": \"INGESTION\",\n \"timePartitionType\": \"YEAR\",\n \"timePartitionExpiration\": 0}\n ])\n\n dev_data_generator >> selector >> expression_evaluator_1 >> bigquery\n selector >> expression_evaluator_2 >> bigquery\n\n selector.condition = [dict(outputLane=selector.output_lanes[0], predicate='${record:value(\\'/id\\')%2==0}'),\n dict(outputLane=selector.output_lanes[1], predicate='default')]\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n\n bigquery_client = gcp.bigquery_client\n dataset_ref_1 = DatasetReference(gcp.project_id, dataset_name_1)\n dataset_ref_2 = DatasetReference(gcp.project_id, dataset_name_2)\n\n try:\n logger.info(f'Creating temporary bucket {bucket_name}')\n bucket = gcp.retry_429(gcp.storage_client.create_bucket)(bucket_name)\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Verify by reading records using Google BigQuery client\n table_1 = bigquery_client.get_table(f'{dataset_name_1}.{table_name_1}')\n data_from_bigquery_1 = [tuple(row.values()) for row in bigquery_client.list_rows(table_1)]\n data_from_bigquery_1.sort()\n\n table_2 = bigquery_client.get_table(f'{dataset_name_2}.{table_name_2}')\n data_from_bigquery_2 = [tuple(row.values()) for row in bigquery_client.list_rows(table_2)]\n data_from_bigquery_2.sort()\n\n # Assert table is partitioned as well\n assert table_1.time_partitioning.type_ == 'MONTH'\n assert table_2.time_partitioning.type_ == 'YEAR'\n assert len(data_from_bigquery_1) + len(data_from_bigquery_2) == records_count\n finally:\n _clean_up_bigquery(bigquery_client, dataset_ref_1)\n _clean_up_bigquery(bigquery_client, dataset_ref_2)\n _clean_up_gcs(gcp, bucket, bucket_name)", "def get_mgmt_partition(adapter):\n\n # There will almost always be fewer VIOSes than LPARs. Since we're\n # querying without xags, it should be very quick.\n vio_wraps = vios.VIOS.search(adapter, is_mgmt_partition=True)\n if len(vio_wraps) == 1:\n return vio_wraps[0]\n\n # We delay the query to the LPARs because there could be hundreds of them.\n # So we don't want to query it unless we need to.\n lpar_wraps = lpar.LPAR.search(adapter, is_mgmt_partition=True)\n if len(lpar_wraps) == 1:\n return lpar_wraps[0]\n\n # If we made it here, something is wrong.\n raise ex.ManagementPartitionNotFoundException(\n count=len(vio_wraps + lpar_wraps))", "def is_partition_the_last(dbapi, partition):\n idisk_uuid = partition.get('idisk_uuid')\n onidisk_parts = dbapi.partition_get_by_idisk(idisk_uuid)\n part_number = get_part_number(partition.get('device_path'))\n\n if int(part_number) != len(onidisk_parts):\n return False\n\n return True", "def _key_for_partition(self, user_partition):\r\n return 'xblock.partition_service.partition_{0}'.format(user_partition.id)" ]
[ "0.72047216", "0.690783", "0.6624961", "0.657136", "0.64423627", "0.6396148", "0.62411654", "0.624026", "0.61577463", "0.606659", "0.606659", "0.6044867", "0.6042913", "0.60408926", "0.6003339", "0.5997353", "0.5991223", "0.5967711", "0.5941188", "0.59136415", "0.5886885", "0.58211505", "0.58148944", "0.58081156", "0.5779908", "0.57725185", "0.5755787", "0.57544607", "0.567957", "0.5661686", "0.56517917", "0.5614113", "0.56137264", "0.5598748", "0.5585543", "0.55823165", "0.5580243", "0.5578068", "0.55480987", "0.5542683", "0.55200696", "0.5503679", "0.55024123", "0.55023235", "0.548938", "0.548938", "0.54761887", "0.54701227", "0.5467146", "0.5444355", "0.5434276", "0.541746", "0.54148054", "0.54090536", "0.53947675", "0.5372952", "0.53667015", "0.53594244", "0.53433573", "0.53373325", "0.5334659", "0.5323726", "0.53230244", "0.53111804", "0.52849317", "0.52834296", "0.5282378", "0.52788526", "0.52788526", "0.5278486", "0.5277651", "0.52721465", "0.5270193", "0.52530074", "0.52530074", "0.52185017", "0.5211883", "0.52089536", "0.5206767", "0.52009976", "0.5196631", "0.519654", "0.5171216", "0.5167179", "0.515837", "0.5152577", "0.5148404", "0.51351565", "0.5129381", "0.51263976", "0.51202214", "0.5118292", "0.5118157", "0.5116021", "0.50955504", "0.508426", "0.50827", "0.50706196", "0.5058876", "0.50514627" ]
0.7471435
0
From topology. Prepare a table request and pass it to the sink
def yield_table( self, table_name_and_type: Tuple[str, str] ) -> Iterable[Optional[CreateTableRequest]]: table_name, table_type = table_name_and_type schema_name = self.context.database_schema.name.__root__ db_name = self.context.database.name.__root__ try: columns, table_constraints = self.get_columns_and_constraints( schema_name=schema_name, table_name=table_name, db_name=db_name, inspector=self.inspector, ) view_definition = self.get_view_definition( table_type=table_type, table_name=table_name, schema_name=schema_name, inspector=self.inspector, ) table_request = CreateTableRequest( name=table_name, tableType=table_type, description=self.get_table_description( schema_name=schema_name, table_name=table_name, inspector=self.inspector, ), columns=columns, viewDefinition=view_definition, tableConstraints=table_constraints if table_constraints else None, databaseSchema=EntityReference( id=self.context.database_schema.id, type="databaseSchema", ), tags=self.get_tag_labels( table_name=table_name ), # Pick tags from context info, if any ) is_partitioned, partition_details = self.get_table_partition_details( table_name=table_name, schema_name=schema_name, inspector=self.inspector ) if is_partitioned: table_request.tableType = TableType.Partitioned.value table_request.tablePartition = partition_details if table_type == TableType.View or view_definition: table_view = TableView.parse_obj( { "table_name": table_name, "schema_name": schema_name, "db_name": db_name, "view_definition": view_definition, } ) self.context.table_views.append(table_view) yield table_request self.register_record(table_request=table_request) except Exception as exc: logger.debug(traceback.format_exc()) logger.warning(f"Unexpected exception to yield table [{table_name}]: {exc}") self.status.failures.append(f"{self.config.serviceName}.{table_name}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(event, context):\n # pylint: enable=unused-argument\n data = decode(event[\"data\"])\n to_table([data], PROJECT, DATASET, TABLE)", "def _create_input_data(self):\n SCHEMA = parse_table_schema_from_json(\n '{\"fields\": [{\"name\": \"data\", \"type\": \"BYTES\"}]}')\n\n def format_record(record):\n # Since Synthetic Source returns data as a dictionary, we should skip one\n # of the part\n import base64\n return {'data': base64.b64encode(record[1])}\n\n with TestPipeline() as p:\n ( # pylint: disable=expression-not-assigned\n p\n | 'Produce rows' >> Read(\n SyntheticSource(self.parse_synthetic_source_options()))\n | 'Format' >> Map(format_record)\n | 'Write to BigQuery' >> WriteToBigQuery(\n dataset=self.input_dataset,\n table=self.input_table,\n schema=SCHEMA,\n create_disposition=BigQueryDisposition.CREATE_IF_NEEDED,\n write_disposition=BigQueryDisposition.WRITE_EMPTY))", "def GetTableData(self, query_parameters):\n raise NotImplementedError('Implement this')", "def create_table_request_info(self):\n table_query = f\"\"\"\n Create Table If Not Exists Request_Info(\n {self.__fields[0]} INT AUTO_INCREMENT PRIMARY KEY,\n {self.__fields[1]} TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n {self.__fields[2]} CHAR(30),\n {self.__fields[3]} CHAR(30),\n {self.__fields[4]} CHAR(30) NULL,\n {self.__fields[5]} DATE,\n {self.__fields[6]} CHAR(15),\n {self.__fields[7]} CHAR(30),\n {self.__fields[8]} CHAR(30),\n {self.__fields[9]} CHAR(30),\n {self.__fields[10]} INT(32),\n {self.__fields[11]} CHAR(30),\n {self.__fields[12]} INT(32),\n {self.__fields[13]} VARCHAR(30))\n \"\"\"\n self.execute(table_query)", "def _setup_origin_table(self):\n if self._create_table_if_not_exists(self.dataset):\n return\n\n directory, pipeline_builder = self._directory_origin(MAX_CONCURRENCY)\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer', type='destination')\n jdbc_producer.set_attributes(default_operation=\"INSERT\",\n field_to_column_mapping=[],\n enclose_object_names=True,\n use_multi_row_operation=True,\n statement_parameter_limit=32768,\n table_name=self.dataset)\n\n directory >> jdbc_producer\n\n pipeline = pipeline_builder.build().configure_for_environment(self.environments['database'])\n self.sdc_executor.add_pipeline(pipeline)\n self.sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(self.record_count, timeout_sec=LOAD_TIMEOUT)\n self.sdc_executor.stop_pipeline(pipeline)\n self.sdc_executor.remove_pipeline(pipeline)", "def _init_table(self, table: \"Table\"):\n if not self.columns:\n self.columns = table.columns\n self._data = table.data", "def run(self):\n\n next_node = super().run(printer)\n # Get the values of the parameters, dereferencing any variables\n P = self.parameters.current_values_to_dict(\n context=seamm.flowchart_variables._data\n )\n tablename = P[\"table name\"]\n\n # Print out header to the main output\n printer.important(self.description_text(P))\n printer.important(\"\")\n\n if P[\"method\"] == \"Create\":\n table = pandas.DataFrame()\n defaults = {}\n for d in self.parameters[\"columns\"].value:\n column_name = self.get_value(d[\"name\"])\n if column_name not in table.columns:\n if d[\"type\"] == \"boolean\":\n if d[\"default\"] == \"\":\n default = False\n else:\n default = bool(d[\"default\"])\n elif d[\"type\"] == \"integer\":\n if d[\"default\"] == \"\":\n default = 0\n else:\n default = int(d[\"default\"])\n elif d[\"type\"] == \"float\":\n if d[\"default\"] == \"\":\n default = np.nan\n else:\n default = float(d[\"default\"])\n elif d[\"type\"] == \"string\":\n default = d[\"default\"]\n\n table[column_name] = default\n defaults[column_name] = default\n\n self.logger.info(f\"Creating table '{tablename}'\")\n\n index = P[\"index column\"]\n if index == \"\" or index == \"--none--\":\n index = None\n else:\n if index not in table.columns:\n columns = \", \".join(table.columns)\n raise ValueError(\n f\"The index column '{index}' is not in the table: columns = \"\n f\"{columns}\"\n )\n table.set_index(index, inplace=True)\n self.set_variable(\n tablename,\n {\n \"type\": \"pandas\",\n \"table\": table,\n \"defaults\": defaults,\n \"index column\": index,\n \"loop index\": False,\n \"current index\": 0,\n },\n )\n elif P[\"method\"] == \"Read\":\n filename = P[\"filename\"]\n\n self.logger.debug(\" read table from {}\".format(filename))\n\n file_type = P[\"file type\"]\n if file_type == \"from extension\":\n file_type = PurePath(filename).suffix\n if file_type not in self.parameters[\"file type\"].enumeration:\n types = \"', '\".join(self.parameters[\"file type\"].enumeration)\n raise RuntimeError(\n f\"Cannot handle files of type '{file_type}' when reading \"\n f\"table '{tablename}'.\\nKnown types: '{types}'\"\n )\n\n if file_type == \".csv\":\n table = pandas.read_csv(filename, index_col=False)\n elif file_type == \".json\":\n table = pandas.read_json(filename)\n elif file_type == \".xlsx\":\n table = pandas.read_excel(filename, index_col=False)\n elif file_type == \".txt\":\n table = pandas.read_fwf(filename, index_col=False)\n else:\n types = \"', '\".join(self.parameters[\"file type\"].enumeration)\n raise RuntimeError(\n f\"Table save: cannot handle format '{file_type}' for file \"\n f\"'{filename}'\\nKnown types: '{types}'\"\n )\n\n index = P[\"index column\"]\n if index == \"\" or index == \"--none--\":\n index = None\n else:\n if index not in table.columns:\n columns = \", \".join(table.columns)\n raise ValueError(\n f\"The index column '{index}' is not in the table: columns = \"\n f\"{columns}\"\n )\n table.set_index(index, inplace=True)\n\n self.logger.debug(\" setting up dict in {}\".format(tablename))\n self.set_variable(\n tablename,\n {\n \"type\": \"pandas\",\n \"filename\": filename,\n \"table\": table,\n \"defaults\": {},\n \"index column\": index,\n \"loop index\": False,\n \"current index\": 0,\n },\n )\n\n self.logger.info(\"Successfully read table from {}\".format(filename))\n elif P[\"method\"] == \"Save\" or P[\"method\"] == \"Save as\":\n self.calls += 1\n if self.calls % P[\"frequency\"] == 0:\n if not self.variable_exists(tablename):\n raise RuntimeError(\n \"Table save: table '{}' does not exist.\".format(tablename)\n )\n file_type = P[\"file type\"]\n table_handle = self.get_variable(tablename)\n table = table_handle[\"table\"]\n if P[\"method\"] == \"Save as\":\n filename = P[\"filename\"]\n table_handle[\"filename\"] = filename\n else:\n if \"filename\" not in table_handle:\n if file_type == \"from extension\":\n file_type = \".csv\"\n table_handle[\"filename\"] = os.path.join(\n self.flowchart.root_directory, tablename + file_type\n )\n filename = table_handle[\"filename\"]\n\n index = table_handle[\"index column\"]\n\n if file_type == \"from extension\":\n file_type = PurePath(filename).suffix\n if file_type not in self.parameters[\"file type\"].enumeration:\n types = \"', '\".join(self.parameters[\"file type\"].enumeration)\n raise RuntimeError(\n f\"Cannot handle files of type '{file_type}' when writing \"\n f\"table '{tablename}'.\\nKnown types: '{types}'\"\n )\n if file_type == \".csv\":\n if index is None:\n table.to_csv(filename, index=False)\n else:\n table.to_csv(filename, index=True, header=True)\n elif file_type == \".json\":\n if index is None:\n table.to_json(filename, indent=4, orient=\"table\", index=False)\n else:\n table.to_json(filename, indent=4, orient=\"table\", index=True)\n elif file_type == \".xlsx\":\n if index is None:\n table.to_excel(filename, index=False)\n else:\n table.to_excel(filename, index=True)\n elif file_type == \".txt\":\n with open(filename, \"w\") as fd:\n if index is None:\n fd.write(table.to_string(header=True, index=False))\n else:\n fd.write(table.to_string(header=True, index=True))\n else:\n types = \"', '\".join(self.parameters[\"file type\"].enumeration)\n raise RuntimeError(\n f\"Table save: cannot handle format '{file_type}' for file \"\n f\"'{filename}'\\nKnown types: '{types}'\"\n )\n elif P[\"method\"] == \"Print\":\n table_handle = self.get_variable(tablename)\n table = table_handle[\"table\"]\n index = table_handle[\"index column\"]\n printer.job(\"\\nTable '{}':\".format(tablename))\n if index is None:\n printer.job(table.to_string(header=True, index=False))\n else:\n printer.job(table.to_string(header=True, index=True))\n\n elif P[\"method\"] == \"Print the current row of\":\n table_handle = self.get_variable(tablename)\n table = table_handle[\"table\"]\n index = table_handle[\"current index\"]\n self.logger.debug(\"index = {}\".format(index))\n index = table.index.get_loc(index)\n self.logger.debug(\" --> {}\".format(index))\n if index is None:\n lines = table.to_string(header=True, index=False)\n else:\n lines = table.to_string(header=True, index=True)\n\n self.logger.debug(lines)\n self.logger.debug(\"-----\")\n\n if index == 0:\n printer.job(\"\\nTable '{}':\".format(tablename))\n printer.job(\"\\n\".join(lines.splitlines()[0:2]))\n else:\n printer.job(lines.splitlines()[index + 1])\n\n elif P[\"method\"] == \"Append a row to\":\n if not self.variable_exists(tablename):\n raise RuntimeError(\n \"Table save: table '{}' does not exist.\".format(tablename)\n )\n table_handle = self.get_variable(tablename)\n if \"defaults\" in table_handle:\n defaults = table_handle[\"defaults\"]\n else:\n defaults = {}\n table = table_handle[\"table\"]\n column_types = {}\n for column_name, column_type in zip(table.columns, table.dtypes):\n if column_type == \"object\":\n column_types[column_name] = \"string\"\n elif column_type == \"bool\":\n column_types[column_name] = \"boolean\"\n elif column_type == \"int64\":\n column_types[column_name] = \"integer\"\n elif column_type == \"float64\":\n column_types[column_name] = \"float\"\n\n new_row = {}\n\n for d in self.parameters[\"columns\"].value:\n column_name = self.get_value(d[\"name\"])\n value = self.get_value(d[\"value\"])\n column_type = column_types[column_name]\n if value == \"default\":\n if column_name in defaults:\n value = defaults[column_name]\n else:\n if column_type == \"boolean\":\n value = False\n elif column_type == \"integer\":\n value = 0\n elif column_type == \"float\":\n value = np.nan\n elif column_type == \"string\":\n value = \"\"\n new_row[column_name] = [value]\n new_row = pandas.DataFrame.from_dict(new_row)\n table = pandas.concat([table, new_row], ignore_index=True)\n seamm.flowchart_variables[tablename][\"table\"] = table\n seamm.flowchart_variables[tablename][\"current index\"] = table.shape[0] - 1\n elif P[\"method\"] == \"Go to the next row of\":\n if not self.variable_exists(tablename):\n raise RuntimeError(\n \"Table save: table '{}' does not exist.\".format(tablename)\n )\n table_handle = self.get_variable(tablename)\n table_handle[\"current index\"] += 1\n\n elif P[\"method\"] == \"Add columns to\":\n if not self.variable_exists(tablename):\n raise RuntimeError(\n \"Table save: table '{}' does not exist.\".format(tablename)\n )\n table_handle = self.get_variable(tablename)\n table = table_handle[\"table\"]\n for d in self.parameters[\"columns\"].value:\n column_name = self.get_value(d[\"name\"])\n if column_name in table.columns:\n # Need to check if this is an error\n pass\n else:\n if d[\"type\"] == \"boolean\":\n if d[\"default\"] == \"\":\n default = False\n else:\n default = bool(d[\"default\"])\n elif d[\"type\"] == \"integer\":\n if d[\"default\"] == \"\":\n default = 0\n else:\n default = int(d[\"default\"])\n elif d[\"type\"] == \"float\":\n if d[\"default\"] == \"\":\n default = np.nan\n else:\n default = float(d[\"default\"])\n elif d[\"type\"] == \"string\":\n default = d[\"default\"]\n table[d[\"name\"]] = default\n elif P[\"method\"] == \"Get element of\":\n if not self.variable_exists(tablename):\n raise RuntimeError(\n \"Table get element: table '{}' does not exist.\".format(tablename)\n )\n if P[\"column\"] == \"\":\n raise RuntimeError(\"Table get element: the column must be given\")\n column = self.get_value(P[\"column\"])\n if P[\"row\"] == \"\":\n raise RuntimeError(\"Table get element: the row must be given\")\n row = self.get_value(P[\"row\"])\n if P[\"variable name\"] == \"\":\n raise RuntimeError(\n \"Table get element: the name of the variable to \"\n \"set to the value must be given\"\n )\n variable_name = self.get_value(P[\"variable name\"])\n\n table_handle = self.get_variable(tablename)\n index = table_handle[\"index column\"]\n table = table_handle[\"table\"]\n\n if row == \"current\":\n row = table_handle[\"current index\"]\n else:\n if index is None:\n row = int(row)\n else:\n if table.index.dtype.kind == \"i\":\n row = int(row)\n row = table.index.get_loc(int(row))\n try:\n column = int(column)\n except Exception:\n column = table.columns.get_loc(column)\n\n value = table.iat[row, column]\n self.set_variable(variable_name, value)\n elif P[\"method\"] == \"Set element of\":\n if not self.variable_exists(tablename):\n raise RuntimeError(\n \"Table get element: table '{}' does not exist.\".format(tablename)\n )\n if P[\"column\"] == \"\":\n raise RuntimeError(\"Table get element: the column must be given\")\n column = self.get_value(P[\"column\"])\n if P[\"row\"] == \"\":\n raise RuntimeError(\"Table get element: the row must be given\")\n row = self.get_value(P[\"row\"])\n if P[\"value\"] == \"\":\n raise RuntimeError(\"Table set element: the value must be given\")\n value = self.get_value(P[\"value\"])\n\n table_handle = self.get_variable(tablename)\n index = table_handle[\"index column\"]\n table = table_handle[\"table\"]\n\n if row == \"current\":\n row = table_handle[\"current index\"]\n else:\n if index is None:\n row = int(row)\n else:\n if table.index.dtype.kind == \"i\":\n row = int(row)\n row = table.index.get_loc(row)\n try:\n column = int(column)\n except Exception:\n column = table.columns.get_loc(column)\n\n table.iat[row, column] = value\n else:\n methods = \", \".join(table_step.methods)\n raise RuntimeError(\n f\"The table method must be one of {methods}, not {P['method']}.\"\n )\n\n return next_node", "def __init__(self, output_file, table_model):\n pass", "def populateTable(self):\n\n output_list = self.output_ports.split(', ')\n\n for i in output_list:\n values = i.split('-')\n nextHopPort = values[0]\n linkCost = values[1]\n destId = values[2]\n learnedFrom = 0 # As it was learned from ConfigFile\n row = routing_row.RoutingRow(nextHopPort, destId, linkCost, destId, learnedFrom)\n self.addToRoutingTable(row)", "def table_creater(self, tablename, columnnames, entries):\n createrurl = self.casjobsurl + '/contexts/MyDB/query'", "def main(sources: gpd.GeoDataFrame, destinations: gpd.GeoDataFrame, router, annotations='duration', threads: int = 10, mts: int = 2000, keep_columns=None) -> write_stream:\n\tt = table_route(sources['geometry'], destinations['geometry'], router, annotations=annotations, max_table_size=mts, threads=threads)\n\n\tif keep_columns is not None:\n\t\tkeep_columns = keep_columns.split(',')\n\t\tfor k in keep_columns:\n\t\t\tif k not in sources and k not in destinations:\n\t\t\t\traise CommandError(f'column {k} not present in sources, nor in destinations. Available columns are: sources: {\", \".join(list(sources))}, destinations: {\", \".join(list(sources))}')\n\t\tsub_sources = sources[[k for k in keep_columns if k in sources]]\n\t\tsub_destinations = destinations[[k for k in keep_columns if k in destinations]]\n\n\tfor df in t:\n\t\tdf['geometry'] = utils.linestring_between(df.geometry, df.geometry_dest)\n\t\tdf.drop('geometry_dest', axis=1, inplace=True)\n\t\tif keep_columns is not None:\n\t\t\tdf = df.merge(sub_sources, left_on='source', right_index=True, suffixes=('', '_source'))\n\t\t\tdf = df.merge(sub_destinations, left_on='destination', right_index=True, suffixes=('', '_dest'))\n\n\t\tyield gpd.GeoDataFrame(df, crs=4326)", "def table_builder(request):\n kind=request.param\n def _builder(data, columns):\n if kind==\"array\":\n return np.array(data)\n elif kind==\"table\":\n return DataTable(data,columns,transposed=False)\n else:\n return pd.DataFrame(data,columns=columns)\n _builder.kind=kind\n return _builder", "def setrawtable(self, rawtable):\n\n # Store the \"raw\" table data\n self.__rawtable = rawtable", "def test_upload_table_from_source(self, test_info, current_test):\n\n test_results = test_info\n\n if current_test == 'DELETE_TABLE':\n log.debug('Executing Test - \"Delete table from user area\"')\n else:\n log.debug('Executing Test - \"Upload table on the fly and query it\"')\n\n # execute request\n try:\n\n # Step 1: Read user name from credentials file. (we will use it later) and do Login()\n # -----------------------------------------------------------------------------------\n\n self.gaia.login(user=credentials.USERNAME, password=credentials.PASSWORD)\n\n # Step 2: Check if table already exist. If it does, then deleted it before run the script\n # -----------------------------------------------------------------------------------------\n table_name = test_results['table_name']\n\n try:\n self.gaia.delete_user_table(table_name)\n except:\n log.warning(f\"Table {table_name} didn't exist. Continuing...\")\n\n # Step 3: Upload table from resource\n # -----------------------------------------------------------------------------------------\n test_resource = paths.path2_example_tb_4_onthefly\n table_description = test_results['table_description']\n self.gaia.upload_table(upload_resource=test_resource, table_name=table_name,\n table_description=table_description)\n sleep(int(conf.TIME_OUT))\n\n # Step 4: Query the new table\n # -----------------------------------------------------------------------------------------\n\n # Use the user name that we obtained before to replace the pattern in\n # 'user_@[email protected]_test_from_url'\n\n full_qualified_table_name = test_common_cons.FULL_QUALIFIED_USER_TABLE_NAME_PATTERN. \\\n replace(test_common_cons.LOGIN_USER_PATTERN, credentials.USERNAME)\n full_qualified_table_name = full_qualified_table_name.replace(test_common_cons.TABLE_NAME_PATTERN,\n table_name)\n\n # Now replace the pattern in Query with its correct value\n query = test_results['test_query'].replace(test_common_cons.TABLE_NAME_PATTERN, full_qualified_table_name)\n\n # Now we are ready to query the new table\n job2 = self.gaia.launch_job(query=query)\n results = job2.get_results()\n\n log.debug(str(results))\n\n n_results = len(results)\n log.debug(f'N results from the table upload from URL is {n_results}')\n\n # Step 5: Delete now the table from the user schema.\n # -----------------------------------------------------------------------------------------\n job_delete = None\n try:\n job_delete = self.gaia.delete_user_table(table_name)\n except:\n log.warning(f\"Table {table_name} didn't exist. Continuing...\")\n\n # Get current time to complete our result object\n time = datetime.now()\n time_str = time.strftime('%Y-%m-%d %H:%M:%S')\n test_results['test_finished'] = f'{time_str} CET'\n\n if current_test == 'DELETE_TABLE':\n try:\n error_message = \"Ooops! Something went wrong. Table still exist in user space.\"\n job = self.gaia.launch_job(query=query)\n job.get_results()\n log.debug(str(job_delete) + \"_:\" + error_message)\n test_results['test_result'] = NOT_PASSED\n test_results['test_additional_info'] = error_message + \",\" + str(job_delete)\n\n except HTTPError as err:\n\n # if the table does not exist. TAP gives back an http error. In order to check if the test of the\n # delete table is correct, we are going to check the content of that HTTPError.\n error_message = f'Query did not give back any result because {full_qualified_table_name} does ' \\\n f'not exist in the user space '\n\n if table_name in str(err):\n log.debug(error_message)\n test_results['test_result'] = PASSED\n test_results['test_additional_info'] = error_message\n\n else:\n test_results['test_result'] = NOT_PASSED\n test_results['test_additional_info'] = error_message + \",\" + str(err)\n log.debug(error_message)\n\n # Step 6: Finally we will do a logout from the system before returning the results after the\n # exception.\n # -----------------------------------------------------------------------------------------\n\n self.gaia.logout()\n return test_results\n else:\n # We are not testing the delete of the tables so we will focus on the results returned in order to check\n # if the test is correct or not.\n\n if n_results > 0:\n # Test passed\n test_results['test_result'] = PASSED\n debug_message = f' Number of results from the table upload from the url provided is {n_results}'\n test_results['test_additional_info'] = debug_message + str(results)\n log.debug(debug_message + \" TEST PASSED!!!\")\n else:\n test_results['test_result'] = NOT_PASSED\n error_message = f' Number of results from the table upload from the url provided is {n_results}' \\\n f' or something happened. FAILED TEST'\n test_results['test_additional_info'] = error_message\n log.debug(error_message)\n raise ValueError(error_message)\n\n # Step 6: Finally we will do a logout from the system.\n # -----------------------------------------------------------------------------------------\n self.gaia.logout()\n return test_results\n\n except ValueError as err:\n log.error(str(err))\n # Get current time\n time = datetime.now()\n time_str = time.strftime('%Y-%m-%d %H:%M:%S')\n # fill result object with the info from the http error\n test_results['test_finished'] = f'{time_str} CET'\n test_results['test_result'] = NOT_PASSED\n test_results['test_additional_info'] = str(err)\n self.gaia.logout()\n return test_results\n\n except HTTPError as err:\n\n error_message = \"Error connecting TAP server\"\n log.error(error_message)\n\n # Get current time\n time = datetime.now()\n time_str = time.strftime('%Y-%m-%d %H:%M:%S')\n # fill result object with the info from the http error\n test_results['test_finished'] = f'{time_str} CET'\n test_results['test_result'] = NOT_PASSED\n test_results['test_additional_info'] = error_message + \",\" + str(err)\n self.gaia.logout()\n return test_results", "def copy_table_stream_settings(events: dict, context: dict):\n if 'SourceTableName' not in events:\n raise KeyError('Requires SourceTableName')\n if 'TargetTableName' not in events:\n raise KeyError('Requires TargetTableName')\n\n source_table_name = events['SourceTableName']\n description = _describe_table(table_name=source_table_name)['Table']\n stream_enabled = description\\\n .get('StreamSpecification', {})\\\n .get('StreamEnabled', False)\n if stream_enabled:\n target_table_name = events['TargetTableName']\n stream_view_type = description['StreamSpecification']['StreamViewType']\n settings = {\n \"StreamSpecification\": {\n \"StreamEnabled\": stream_enabled,\n \"StreamViewType\": stream_view_type\n }\n }\n result = _update_table(table_name=target_table_name, **settings)\n specification = result.get('StreamSpecification', {})\n return specification", "def main(conn, label_config, table_name, start_date, end_date,\r\n preprocessing_prefix):\r\n label_sql = label_config['query']\r\n label_sql = label_sql.replace('{prefix}', preprocessing_prefix)\r\n label_sql = label_sql.replace('{start_date}', start_date)\r\n label_sql = label_sql.replace('{end_date}', end_date)\r\n drop_sql = f'drop table if exists {table_name};'\r\n create_sql = f'create table {table_name} as ({label_sql});'\r\n sql.run_sql_from_string(conn, drop_sql)\r\n sql.run_sql_from_string(conn, create_sql)", "def get_table_data(table_name, query, pages, table_columns, headers, base_url, maxpagesize):\n\n\n logging.info(\"Running get_table_data() . . . \")\n table_data = []\n for p in range(pages):\n page_number = p + 1\n\n #print('\\tGetting page number {}'.format(page_number))\n #print(\"Running TEST MESSAGE . . . \")\n\n endpoint = '{0}/ws/schema/table/{1}?{2}page={3}&pagesize={4}&projection={5}'.format(base_url, table_name, query, page_number, maxpagesize, table_columns)\n r_data = requests.get(endpoint, headers=headers)\n\n if r_data.ok:\n data_json = r_data.json()\n records = data_json['record']\n for r in records:\n table_data.append(r['tables'][table_name])\n else:\n logging.info(r_data.text)\n raise Exception(r_data.text)\n\n return table_data", "def create_table(self, param, timeout):\n _abstract()", "def create_table(self, param, timeout):\n _abstract()", "def _make_request(self, method, body):\n return self.client.insert_rows_json(self.table_ref, [row['json'] for row in body['rows']])", "def subscribe(self, host:str, port:str, handler:Callable, tableName:str, actionName:str=None, offset:int=-1, resub:bool=False, \n filter=None, msgAsTable:bool=False, batchSize:int=0, throttle:float=1,\n userName:str=None, password:str=None, streamDeserializer:Optional[Type[\"streamDeserializer\"]]=None) -> None:\n if not isinstance(msgAsTable, bool):\n raise ValueError(\"msgAsTable must be a bool\")\n if filter is None:\n filter = np.array([],dtype='int64')\n if actionName is None:\n actionName = \"\"\n if userName is None:\n userName = \"\"\n if password is None:\n password = \"\"\n sd=None\n if streamDeserializer is None:\n sd = ddbcpp.streamDeserializer({})\n else:\n sd = streamDeserializer.cpp\n if batchSize > 0:\n self.cpp.subscribeBatch(host, port, handler, tableName, actionName, offset, resub, filter, msgAsTable, batchSize, throttle,userName,password,sd)\n else:\n if msgAsTable:\n raise ValueError(\"msgAsTable must be False when batchSize is 0\")\n self.cpp.subscribe(host, port, handler, tableName, actionName, offset, resub, filter,userName,password,sd)", "def _process(self, tables=None):\n\n if self._tables:\n return self._tables\n\n tables = tables or {}\n\n for row in self.url.generator.iter_rp:\n\n table_id_key = row['Table ID'].strip().lower()\n\n if not row['Line Number'].strip():\n if 'Universe' not in row['Table Title']:\n if table_id_key not in tables:\n tables[table_id_key] = Table(row['Table ID'], row['Table Title'].strip().title(),\n seq=row['Sequence Number'],\n startpos=int(row['Start Position']))\n else:\n tables[table_id_key].seq = row['Sequence Number']\n tables[table_id_key].startpos = row['Start Position']\n tables[table_id_key].subject = row['Subject Area']\n\n else:\n tables[table_id_key].universe = row['Table Title'].replace('Universe: ', '').strip()\n\n else: # column row\n try:\n\n line_no = int(row['Line Number'])\n\n if not line_no in tables[table_id_key].columns:\n tables[table_id_key].columns[line_no] = Column(row['Table ID'],\n f\"{row['Table ID']}_{line_no:03}\",\n line_no,\n description=row['Table Title'])\n else:\n tables[table_id_key].columns[line_no].description = row['Table Title']\n\n\n except ValueError as e:\n # Headings, which have fractional line numebrs\n # print(row)\n pass\n\n self._tables = tables\n\n return self._tables", "def __init__(self, connection=None, url=None,\r\n table=None, statement=None, schema=None, autoinit = True,\r\n **options):\r\n\r\n super(SQLDataSource, self).__init__()\r\n\r\n if not table and not statement:\r\n raise AttributeError(\"Either table or statement should be \" \\\r\n \"provided for SQL data source\")\r\n\r\n if statement:\r\n raise NotImplementedError(\"SQL source stream based on statement \" \\\r\n \"is not yet implemented\")\r\n\r\n if not options:\r\n options = {}\r\n\r\n self.url = url\r\n self.connection = connection\r\n\r\n self.table_name = table\r\n self.statement = statement\r\n self.schema = schema\r\n self.options = options\r\n\r\n self.context = None\r\n self.table = None\r\n self.fields = None\r\n\r\n if autoinit:\r\n self.initialize()", "def _table_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n if op.node_name != \"TableDescription\":\n raise TypeError(\n \"op was supposed to be a data_algebra.data_ops.TableDescription\"\n )\n res = data_map[op.table_name]\n if not self.is_appropriate_data_instance(res):\n raise ValueError(\n \"data_map[\" + op.table_name + \"] was not the right type\"\n )\n if self.use_lazy_eval and (not isinstance(res, pl.LazyFrame)):\n res = res.lazy()\n res = res.select(op.columns_produced())\n return res", "def import_table(ctx: DataFunctionContext, table_name: str, copy: bool = True):\n target_storage = ctx.execution_config.get_target_storage()\n if ensure_bool(copy):\n as_identifier = target_storage.get_api().get_quoted_identifier\n sql = f\"select * from {as_identifier(table_name)}\"\n # TODO: DRY this pattern\n sdf = SqlDataFunctionWrapper(sql)\n\n def get_sql(*args, **kwargs):\n return sql\n\n sdf.get_compiled_sql = get_sql\n return sdf(ctx)\n else:\n ctx.emit(\n name=table_name,\n storage=target_storage,\n data_format=\"table\",\n create_alias_only=True,\n )", "def _check_for_input_data(self):\n wrapper = BigQueryWrapper()\n try:\n wrapper.get_table(self.project_id, self.input_dataset, self.input_table)\n except HttpError as exn:\n if exn.status_code == 404:\n self._create_input_data()", "def pre_route_table_create(self, resource_dict):\n pass", "def latency_table_create(self):\n\n self.__table_create(Latency)", "def test_make_ags(self):\n table_factory = DataTableFactory(PACKET_DIR)\n table_factory.ags()", "def generate_table(self, rows):\n ...", "def test_sendTableWithName(self):\n client = SchemaAMP(schema)\n\n class SampleCommand(Command):\n arguments = [(\"table\", TableSyntaxByName())]\n\n class Receiver(SchemaAMP):\n @SampleCommand.responder\n def gotIt(self, table):\n self.it = table\n return {}\n\n server = Receiver(schema)\n clientT = StringTransport()\n serverT = StringTransport()\n client.makeConnection(clientT)\n server.makeConnection(serverT)\n client.callRemote(SampleCommand, table=schema.DUMMY_WORK_ITEM)\n server.dataReceived(clientT.io.getvalue())\n self.assertEqual(server.it, schema.DUMMY_WORK_ITEM)", "def _populate_output(self):\n self._store_atomic_queries_table()\n self._store_composite_queries_table()", "def _datapoint_to_dataframe(self, action, t_in):\n X = {\"action\": action,\n \"t_in\": t_in}\n\n return pd.DataFrame(X, index=[0])", "def _send_table_description(self, variant):\n \n # Set up the table description to send as metadata to Qlik\n self.table = SSE.TableDescription()\n self.table.name = \"SSE-Response\"\n self.table.numberOfRows = len(self.response)\n\n # Set up fields for the table\n if variant == \"setup\":\n self.table.fields.add(name=\"model_name\")\n self.table.fields.add(name=\"result\")\n self.table.fields.add(name=\"timestamp\")\n elif variant == \"features\":\n self.table.fields.add(name=\"model_name\")\n self.table.fields.add(name=\"sort_order\", dataType=1)\n self.table.fields.add(name=\"feature\")\n self.table.fields.add(name=\"var_type\")\n self.table.fields.add(name=\"data_type\")\n self.table.fields.add(name=\"strategy\")\n self.table.fields.add(name=\"strategy_args\")\n elif variant == \"fit\":\n self.table.fields.add(name=\"model_name\")\n self.table.fields.add(name=\"result\")\n self.table.fields.add(name=\"time_stamp\")\n self.table.fields.add(name=\"score_result\")\n self.table.fields.add(name=\"score\", dataType=1)\n elif variant == \"metrics_clf\":\n self.table.fields.add(name=\"model_name\")\n self.table.fields.add(name=\"class\")\n self.table.fields.add(name=\"accuracy\", dataType=1)\n self.table.fields.add(name=\"precision\", dataType=1)\n self.table.fields.add(name=\"recall\", dataType=1)\n self.table.fields.add(name=\"fscore\", dataType=1)\n self.table.fields.add(name=\"support\", dataType=1)\n elif variant == \"metrics_clf_cv\":\n self.table.fields.add(name=\"model_name\")\n self.table.fields.add(name=\"class\")\n self.table.fields.add(name=\"accuracy\", dataType=1)\n self.table.fields.add(name=\"accuracy_std\", dataType=1)\n self.table.fields.add(name=\"precision\", dataType=1)\n self.table.fields.add(name=\"precision_std\", dataType=1)\n self.table.fields.add(name=\"recall\", dataType=1)\n self.table.fields.add(name=\"recall_std\", dataType=1)\n self.table.fields.add(name=\"fscore\", dataType=1)\n self.table.fields.add(name=\"fscore_std\", dataType=1)\n elif variant == \"metrics_reg\":\n self.table.fields.add(name=\"model_name\")\n self.table.fields.add(name=\"r2_score\", dataType=1)\n self.table.fields.add(name=\"mean_squared_error\", dataType=1)\n self.table.fields.add(name=\"mean_absolute_error\", dataType=1)\n self.table.fields.add(name=\"median_absolute_error\", dataType=1)\n self.table.fields.add(name=\"explained_variance_score\", dataType=1)\n elif variant == \"metrics_reg_cv\":\n self.table.fields.add(name=\"model_name\")\n self.table.fields.add(name=\"r2_score\", dataType=1)\n self.table.fields.add(name=\"r2_score_std\", dataType=1)\n self.table.fields.add(name=\"mean_squared_error\", dataType=1)\n self.table.fields.add(name=\"mean_squared_error_std\", dataType=1)\n self.table.fields.add(name=\"mean_absolute_error\", dataType=1)\n self.table.fields.add(name=\"mean_absolute_error_std\", dataType=1)\n self.table.fields.add(name=\"median_absolute_error\", dataType=1)\n self.table.fields.add(name=\"median_absolute_error_std\", dataType=1)\n self.table.fields.add(name=\"explained_variance_score\", dataType=1)\n self.table.fields.add(name=\"explained_variance_score_std\", dataType=1)\n elif variant == \"confusion_matrix\":\n self.table.fields.add(name=\"model_name\")\n self.table.fields.add(name=\"true_label\")\n self.table.fields.add(name=\"pred_label\")\n self.table.fields.add(name=\"count\", dataType=1)\n elif variant == \"confusion_matrix_multi\":\n self.table.fields.add(name=\"model_name\")\n self.table.fields.add(name=\"step\", dataType=1)\n self.table.fields.add(name=\"true_negative\", dataType=1)\n self.table.fields.add(name=\"false_positive\", dataType=1)\n self.table.fields.add(name=\"false_negative\", dataType=1)\n self.table.fields.add(name=\"true_positive\", dataType=1)\n elif variant == \"importances\":\n self.table.fields.add(name=\"model_name\")\n self.table.fields.add(name=\"feature_name\")\n self.table.fields.add(name=\"importance\", dataType=1)\n elif variant == \"predict\":\n self.table.fields.add(name=\"model_name\")\n self.table.fields.add(name=\"key\")\n self.table.fields.add(name=\"prediction\")\n elif variant == \"expression\":\n self.table.fields.add(name=\"result\")\n elif variant == \"best_params\":\n self.table.fields.add(name=\"model_name\")\n self.table.fields.add(name=\"best_params\")\n elif variant == \"cluster\":\n self.table.fields.add(name=\"model_name\")\n self.table.fields.add(name=\"key\")\n self.table.fields.add(name=\"label\")\n elif variant == \"reduce\":\n self.table.fields.add(name=\"model_name\")\n self.table.fields.add(name=\"key\")\n # Add a variable number of columns depending on the response\n for i in range(self.response.shape[1]-2):\n self.table.fields.add(name=\"dim_{0}\".format(i+1), dataType=1)\n elif variant == \"keras_history\":\n self.table.fields.add(name=\"model_name\")\n # Add columns from the Keras model's history\n for i in range(1, self.response.shape[1]):\n self.table.fields.add(name=self.response.columns[i], dataType=1)\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(5)\n \n # Send table description\n table_header = (('qlik-tabledescription-bin', self.table.SerializeToString()),)\n self.context.send_initial_metadata(table_header)", "def start_table(self):\n self.result = \"<table>\\n\"", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def parse_single_table(source, **kwargs):\n if kwargs.get(\"table_number\") is None:\n kwargs[\"table_number\"] = 0\n\n votable = parse(source, **kwargs)\n\n return votable.get_first_table()", "def __init__(\n self,\n table: \"Table\",\n use_header: bool = True,\n template: Optional[Template] = None,\n escape: bool = True,\n ):\n self.table = table\n self.escape = escape\n self.use_header = use_header\n self.template = self.get_template()\n self.context = self.get_context(table, self.use_header)", "def _prepare_raw_data(kwargs):\n path = kwargs.get(\"path\", None)\n output_path = kwargs.get(\"output_path\", None)\n data_source = DataSource.best_available_data_source()\n for job in data_source.jobs(\n source=\"raw\", path=path, data_path=output_path, stateful=False):\n data_source.write_job(data=job, path=output_path)\n for traffic in data_source.traffics(\n source=\"raw\", path=path, data_path=output_path, stateful=False):\n data_source.write_traffic(data=traffic, path=output_path)", "def get_table_param(self, table, param_name, start_time, provider=None, quiet=True):\n\n provider = self.get_provider(provider)\n if provider is None:\n return None\n\n if self.tables is None:\n self.get_tables(provider)\n\n tables = [table['dataType'] for table in self.tables[provider]]\n if table not in tables:\n log.error('table {:s} invalid for provider {:s}'.format(table, provider))\n return None\n\n if type(start_time) == str:\n start_time = pd.Timestamp(start_time)\n\n\n params={\n 'elementId': param_name,\n 'ssc': 'null',\n 'timestamp': start_time.strftime(date_format_ms)[:-3]}\n\n \n params = urllib.parse.urlencode(params, quote_via=urllib.parse.quote)\n r = requests.get(self._url('/web/tables/params/{:s}/{:s}'.format(provider, table)),\n headers={'Authorization': self.token}, params=params, proxies=self.proxy)\n\n r.raise_for_status()\n data = r.json()\n\n if len(data['data']) == 0:\n log.warn('no data found for the given parameter and time')\n return None\n\n cols = data['headers']\n datacells = [row['dataCells'] for row in data['data']][0]\n table_data = pd.DataFrame(datacells)\n drop_cols = ['cellValue', 'altText', 'bgColor', 'detail', 'webpagelink', 'rowParams', 'metadata']\n table_data.drop(drop_cols, inplace=True, axis=1)\n table_data.columns = cols\n\n if not quiet:\n log.info('{:d} table entries retrieved'.format(len(table_data)))\n\n return table_data", "def prepare_inputs(data, queries, tokenizer):\n # Prepare inputs\n table = pd.DataFrame.from_dict(data)\n inputs = tokenizer(table=table, queries=queries,truncation=True, padding=True,return_tensors=\"pt\").to(device)\n\n # Return things\n return table, inputs", "def RequestData(self, request, inInfo, outInfo):\n # Inputs from different ports:\n pdi = self.GetInputData(inInfo, 0, 0)\n table = self.GetOutputData(outInfo, 0)\n\n\n # Note user has to select a single array to save out\n field, name = self.__inputArray[0], self.__inputArray[1]\n vtkarr = _helpers.getVTKArray(pdi, field, name)\n\n table.GetRowData().AddArray(vtkarr)\n\n return 1", "def set_requests_table(self):\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS requests(\n request_id INTEGER PRIMARY KEY,\n created_at TEXT,\n track_list TEXT,\n languages TEXT,\n locations TEXT,\n minimum_tweet_per_sec TEXT,\n time_frame TEXT,\n peak_detection_sensibility TEXT,\n analysis_sample_size TEXT,\n db_tweets_name TEXT)\n \"\"\")", "def load_table(name, including_stats):\n\n table = saga.host_catalog.load(query=name, include_stats=including_stats)\n table_query = saga.host_catalog.construct_host_query(name)\n sats_table = complete_host_query.filter(sats)\n return table, sats_table", "def sit(self, table):\n self.table = table", "def process(tr, parameters, tableBuilder):\n user = parameters.get(\"user\")\n if not user == None:\n tr.setUserId(user)\n\n sampleID = parameters.get(\"identifier\")\n sample = tr.getSampleForUpdate(sampleID)\n\n properties = parameters.get(\"properties\")\n \n for prop in properties.keySet():\n \tsample.setPropertyValue(prop, properties.get(prop))", "def query_to_table(self, sql, table_path, write_disposition=\"WRITE_TRUNCATE\", **inputs) -> None:\n dataset, table = table_path.split('.')\n dataset_ref = self.client.dataset(dataset)\n table_ref = dataset_ref.table(table)\n job_config = bgq.QueryJobConfig()\n job_config.write_disposition = write_disposition\n job_config.destination = table_ref\n\n if os.path.isfile(sql):\n with open(sql, 'r') as f:\n sql = f.read()\n\n if inputs is not None:\n sql = sql.format(**inputs)\n\n query_job = self.client.query(sql, job_config=job_config)\n\n query_job.result()", "def table_data(api, start_date: datetime.datetime, end_date: datetime.datetime, path: str, column_names: dict = None):\n if column_names is None:\n column_names = {\n \"path\": \"URL\",\n \"method\": \"Method\",\n \"status_code\": \"Status\",\n \"response_time\": \"Duration in [s]\",\n \"date\": \"Date [UTC]\",\n \"remote_address\": \"IP Address\",\n \"user_country_name\": \"Location\",\n \"platform\": \"Operating System\",\n \"browser\": \"Browser\"\n }\n\n requests = api.get_requests_for_path(path, start_date, end_date)\n\n # performance issues?\n # Purpose: Only use specified column_names\n return [\n [vars(request).get(column_name) for column_name in column_names]\n for request in requests\n ], column_names.values()", "def to_Table(self, **kwargs):\n mean_transmit, transmit_ = self.transmit\n data_ = {'WAVELENGTH': self._wavelength,\n 'THROUGHPUT': mean_transmit}\n for num, filterk in enumerate(transmit_, 1):\n data_['THROUGHPUT_{0:d}'.format(num)] = filterk\n data = SimpleTable(data_)\n\n if self.wavelength_unit is not None:\n data.header['WAVELENGTH_UNIT'] = self.wavelength_unit\n data.header['DETECTOR'] = self.dtype\n data.header['COMPNAME'] = self.name\n data.header['NAME'] = self.name\n data.set_comment('THROUGHPUT', 'filter throughput definition')\n data.set_comment('WAVELENGTH', 'filter wavelength definition')\n for num in range(1, len(transmit_) + 1):\n data.set_comment('THROUGHPUT_{0:d}'.format(num),\n 'filter throughput sample')\n data.set_comment('WAVELENGTH', self.wavelength_unit or 'AA')\n return data", "def generate_data_set(x, az_table, az_sink_tbl, maria_table):\n\n keyVault = KeyVault(conf)\n keyVault_pass = keyVault.fetch_secret(\n conf.get(\"source.secret_name\"), conf.get(\"source.secret_version\")\n ).value\n\n columns = get_columns(conf.get('source.url'), conf.get('source.user'), keyVault_pass, \"circ-hack-source\",\n \"dbo\", x)\n\n az_table['name'] = 'AzSource_' + x\n az_table['properties']['typeProperties']['tableName'] = \"[dbo].[\" + x + \"]\"\n az_table['properties']['schema'] = columns\n az_table['properties'].pop('structure', None)\n\n columns = get_columns(conf.get(\"sink.url\"), conf.get(\"sink.user\"), keyVault_pass, \"circ-hack-sink\",\n \"dbo\", x)\n\n az_sink_tbl['name'] = 'AzSink_' + x\n az_sink_tbl['properties']['linkedServiceName']['referenceName'] = \"AzureCircSink\"\n az_sink_tbl['properties']['typeProperties']['tableName'] = \"[dbo].[\" + x + \"]\"\n az_sink_tbl['properties']['folder']['name'] = 'azure_sink'\n az_sink_tbl['properties']['schema'] = columns\n az_sink_tbl['properties'].pop('structure', None)\n\n maria_table['name'] = 'MariaSource_' + x\n maria_table['properties']['typeProperties']['tableName'] = \"`\" + x + \"`\"\n\n return az_table, az_sink_tbl, maria_table", "def config_source(tbl, source):\r\n \r\n # Stupidly using source as a variable name twice\r\n source_ra = np.rad2deg(source._ra)\r\n source_dec = np.rad2deg(source._dec)\r\n source_name = source.name\r\n \r\n print('Source is: %s'%source.name)\r\n \r\n source = tbl.data[0]\r\n \r\n source['SOURCE_ID'] = 1\r\n source['SOURCE'] = source_name\r\n source['VELDEF'] = 'RADIO'\r\n source['VELTYP'] = 'GEOCENTR'\r\n source['FREQID'] = 1\r\n source['RAEPO'] = source_ra\r\n source['DECEPO'] = source_dec\r\n source['EQUINOX'] = 'J2000'\r\n \r\n # Things I'm just making up\r\n source['IFLUX'] = 0\r\n source['QFLUX'] = 0\r\n source['UFLUX'] = 0\r\n source['VFLUX'] = 0\r\n source['ALPHA'] = 0\r\n source['FREQOFF'] = 0\r\n \r\n tbl.data[0] = source\r\n \r\n return tbl", "def __init__(self, *args):\n _snap.TTable_swiginit(self, _snap.new_TTable(*args))", "def test_tableinit(self, source, url):\n\n # mock source.listColumns() to return some dummy columns\n source.listColumns.return_value = [\n self.makeColumn('foo', 'Foo'),\n self.makeColumn('bar', 'Bar')\n ]\n\n # mock source() properties to return True\n source.editable = True\n source.sortable = True\n source.queryable = True\n\n # mock url() property to return some url -> not really important which\n url.return_value = 'http://foo'\n\n # get the tableinit result dict\n widget = self.makeTableWidget()\n result = widget.tableinit()\n self.maxDiff = None\n # test the bleeep out of it\n expected = u\"\"\"\\\n(function($) { $(function() {\n var datatable = new collective.table.Table(\n $('#table-table-datagrid'),\n 'http://foo', [{\"sTitle\": \"Foo\", \"sName\": \"foo\", \"mDataProp\": \"foo\"}, {\"sTitle\": \"Bar\", \"sName\": \"bar\", \"mDataProp\": \"bar\"}], true, true, true);\n}); })(jQuery);\n\"\"\"\n self.assertEquals(expected, result)", "def _retrieve_transaction_table_input(self, execution_arn: str) -> Dict:\n response = self.client.get_execution_history(executionArn=execution_arn,maxResults=1000)\n events = response[\"events\"]\n record_purchase_entered_events = [\n event\n for event in events\n if event[\"type\"] == \"TaskStateEntered\" and event[\"stateEnteredEventDetails\"][\"name\"] == \"InsertPurchase\"\n ]\n\n record_refund_entered_events = [\n event\n for event in events\n if event[\"type\"] == \"TaskStateEntered\" and event[\"stateEnteredEventDetails\"][\"name\"] == \"InsertRefund\"\n ]\n\n record_error_entered_events = [\n event\n for event in events\n if event[\"type\"] == \"TaskStateEntered\" and event[\"stateEnteredEventDetails\"][\"name\"] == \"InsertError\"\n ]\n \n self.assertTrue(\n record_purchase_entered_events,\n \"Cannot find InsertPurchase TaskStateEntered event\",\n )\n self.assertTrue(\n record_refund_entered_events,\n \"Cannot find InsertPurchase TaskStateEntered event\",\n )\n self.assertTrue(\n record_error_entered_events,\n \"Cannot find InsertPurchase TaskStateEntered event\",\n )\n purchase_table_input=[] #PurchaseTable inputs\n refund_table_input=[] # RefundTable inputs\n error_table_input=[] # ErrorTable inputs\n for transaction in record_purchase_entered_events:\n transaction_input = json.loads(transaction[\"stateEnteredEventDetails\"][\"input\"])\n\n purchase_table_input.append(transaction_input)\n self.inserted_purchase_record_id.append(transaction_input[\"TransactionId\"]) # save this ID for cleaning up PurchaseTable\n\n for transaction in record_refund_entered_events:\n transaction_input = json.loads(transaction[\"stateEnteredEventDetails\"][\"input\"])\n\n refund_table_input.append(transaction_input)\n self.inserted_refund_record_id.append(transaction_input[\"TransactionId\"]) # save this ID for cleaning up RefundTable\n\n for transaction in record_error_entered_events:\n transaction_input = json.loads(transaction[\"stateEnteredEventDetails\"][\"input\"])\n\n error_table_input.append(transaction_input)\n self.inserted_error_record_id.append(transaction_input[\"TransactionId\"]) # save this ID for cleaning up ErrorTable\n\n return purchase_table_input, refund_table_input, error_table_input", "def do_set_table(tpath, comment):\n content = sys.stdin.read()\n runs = run.split('-')\n if len(runs) == 1:\n runs.append(ccdb.INFINITE_RUN)\n ass = provider.create_assignment([[content]], tpath, \n runs[0], runs[1],\n var, comment)", "def load_table(opts, stats):\n print(\"--------------------------------------\")\n print(\"Loading table %s\" % (opts.table_name,))\n print(\"--------------------------------------\")\n print(timestamp())\n # Example invocation:\n # spark-submit --class org.apache.kudu.spark.tools.DistributedDataGenerator \\\n # kudu-spark2-tools_2.11-1.8.0-SNAPSHOT.jar \\\n # --type random \\\n # --num-rows 10000000 \\\n # --num-tasks 20 \\\n # impala::default.foo_test3 m123.example.com\n CLASS_NAME = 'org.apache.kudu.spark.tools.DistributedDataGenerator'\n # TODO: Non-string columns are assumed to be 8 bytes.\n row_size_bytes = opts.num_string_columns * opts.string_field_len + \\\n (opts.columns - opts.num_string_columns) * 8\n num_rows = opts.table_data_size_mb * 1024 * 1024 / row_size_bytes\n print(\"INFO: Inserting %d rows of %d bytes each\" % (num_rows, row_size_bytes))\n stats['row_size_bytes'] = row_size_bytes\n stats['num_rows'] = num_rows\n cmd = \"%s --class %s %s --type %s --num-rows %d --num-tasks %d %s %s\" % \\\n (opts.spark_submit_command, CLASS_NAME, opts.kudu_spark_tools_jar,\n opts.load_policy, num_rows, opts.load_num_tasks, opts.table_prefix + opts.table_name,\n opts.master_addresses)\n run_command(opts, cmd)", "def get_table(self, **kwargs):\r\n options = {}\r\n table_class = self.get_table_class()\r\n table = table_class(self.get_table_data(), **kwargs)\r\n paginate = self.get_table_pagination() # pylint: disable=E1102\r\n if paginate is not None:\r\n options['paginate'] = paginate\r\n RequestConfig(self.request, **options).configure(table)\r\n return table", "def table(self, table):\n self._table = table", "def table(self, table):\n self._table = table", "def test_jdbc_tables_header(sdc_builder, sdc_executor, database):\n\n table_name1 = get_random_string(string.ascii_lowercase, 20)\n table_name2 = get_random_string(string.ascii_lowercase, 20)\n if database.type == 'Oracle':\n # When not quoted, Oracle automatically converts names to upper case. Quoting is inconsistent between\n # databases, so it is preferable to avoid it in SQL below. And to get a compatible result during creation,\n # we omit quotes here also.\n create_quotes_names = False\n else:\n create_quotes_names = True\n\n logger.info('Creating two identical tables in %s database...', database.type)\n table1 = _create_table(table_name1, database, quote=create_quotes_names)\n table2 = _create_table(table_name2, database, quote=create_quotes_names)\n\n connection = database.engine.connect()\n try:\n logger.info('Adding %s rows into each table...', len(ROWS_IN_DATABASE))\n connection.execute(table1.insert(), ROWS_IN_DATABASE)\n connection.execute(table2.insert(), ROWS_IN_DATABASE)\n\n builder = sdc_builder.get_pipeline_builder()\n\n sql_query = \"SELECT t1.id, t2.name \" \\\n f\"FROM {table_name1} t1 \" \\\n f\" JOIN {table_name2} t2 \" \\\n \" ON t1.name = t2.name \" \\\n \"WHERE t1.id > ${OFFSET} \" \\\n \"ORDER BY t1.id\"\n origin = builder.add_stage('JDBC Query Consumer')\n origin.sql_query = sql_query\n origin.offset_column = 'id'\n origin.incremental_mode = True\n origin.on_unknown_type = 'STOP_PIPELINE'\n\n wiretap = builder.add_wiretap()\n\n origin >> wiretap.destination\n\n pipeline = builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n sdc_executor.start_pipeline(pipeline)\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 3)\n sdc_executor.stop_pipeline(pipeline)\n\n # Check jdbc.tables header.\n tables_header = wiretap.output_records[0].header['values']['jdbc.tables']\n logger.debug('%s=\"%s\"', \"header['values']['jdbc.tables']\", tables_header)\n logger.debug('%s=\"%s\"', \"database.type\", database.type)\n # According to documentation some JDBC drivers may not provide this information:\n # https://docs.streamsets.com/platform-datacollector/latest/datacollector/UserGuide/Origins/JDBCConsumer.html\n if database.type == 'Oracle':\n # Oracle does not seem to populate this field\n assert tables_header == \"\"\n elif database.type == 'SQLServer':\n # SQLServer does not seem to populate this field\n assert tables_header == \"\"\n else:\n # MySQL, PostgreSQL and MiriaDB all return source table names as a coma-delimited list.\n # Ordering of the list is not known for PostgreSQL and MiriaDB, but For MySQL it is predictably random.\n # The logic below asserts that both names are reported in any order (and case is ignored, though this\n # should not be necessary):\n tables_list = tables_header.split(',')\n tables_normalized_map = map(lambda x:x.lower(), tables_list)\n assert set(tables_normalized_map) == {table_name1, table_name2}\n\n finally:\n try:\n logger.info('Dropping table %s in %s database ...', table_name1, database.type)\n connection.execute(f\"DROP TABLE {table_name1}\")\n logger.info('Dropping table %s in %s database ...', table_name2, database.type)\n connection.execute(f\"DROP TABLE {table_name2}\")\n except Exception as ex:\n logger.warning('Error during cleanup', exc_info=ex)", "def post(self):\n args = parser.parse_args()\n table = TableDetails(args.get('table_size'))\n db.session.add(table)\n db.session.commit()\n return table, 201", "def table_route(sources, destinations, router, max_table_size=2_000, threads=10, annotations='duration', pbar=True, cache_name=None, executor='process', extra_params=None):\n\timport re\n\tif router not in CONFIG['routers'] and not re.match(r'^https?\\://.*', router):\n\t\traise ValueError(f'router must be a key in erde config routers section, or a URL. got: \\'{router}\\'')\n\n\tsources_indices = {i: v for i, v in enumerate(_index(sources))}\n\tdestinations_indices = {i: v for i, v in enumerate(_index(destinations))}\n\tsources = _tolist(sources, 'sources')\n\tdestinations = _tolist(destinations, 'destinations')\n\n\tann_set = set(annotations.split(','))\n\tif ann_set & {'duration', 'distance'} != ann_set:\n\t\traise ValueError(\"annotations must be one of these: 'duration', 'distance', or 'duration,distance' (order does not matter)\")\n\n\tmts = max_table_size\n\thost_url = CONFIG['routers'].get(router, router)\n\n\ttotal_rows, total_cols = rows, cols = len(sources), len(destinations)\n\tif cols * rows > mts:\n\t\tif rows < cols:\n\t\t\t# split by sources\n\t\t\trows = max(mts // cols, 1) # max(,1) beacuse if 1 row does not fit, then at least split by 1 row\n\t\t\tcols = min(mts, cols)\n\t\telse:\n\t\t\tcols = max(mts // rows, 1)\n\t\t\trows = min(mts, rows)\n\n\t_route_partial = partial(_route_chunk, host_url=host_url, annotations=annotations, extra_params=extra_params)\n\n\twith tqdm(total=total_rows * total_cols, desc='Table routing', disable=(not pbar)) as t, ThreadPoolExecutor(max_workers=threads) as tpe:\n\t\tcombos = list(product(range(0, total_rows, rows), range(0, total_cols, cols)))\n\t\tslices = ((sources[s:s + rows], destinations[d:d + cols], s, d) for s, d in combos)\n\n\t\t# process/thread/an instance of executor\n\t\tfor df in tpe.map(_route_partial, slices):\n\t\t\tdf['source'] = df['source'].map(sources_indices)\n\t\t\tdf['destination'] = df['destination'].map(destinations_indices)\n\t\t\tyield df\n\t\t\tt.update(len(df))", "def query_request():\n query_data = request.get_json()\n print(query_data)\n example_response = []\n \n # First we need to check if the request is for table or time series data\n if query_data and query_data == 'table':\n # send back columns and rows\n pass\n elif query_data:\n # send back value/clock pairs for timeseries charts\n example_response = generate_fake_timeseries(query_data.get('range', {}).get('from'),\n query_data.get('range', {}).get('to'),\n interval=query_data.get('intervalMs', 60000),\n create=4)\n return make_response(jsonify(example_response))", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n column_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n column_wildcard: Optional[pulumi.Input[pulumi.InputType['DataCellsFilterColumnWildcardArgs']]] = None,\n database_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n row_filter: Optional[pulumi.Input[pulumi.InputType['DataCellsFilterRowFilterArgs']]] = None,\n table_catalog_id: Optional[pulumi.Input[str]] = None,\n table_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(self,\n redshift_conn_id='',\n table='',\n sql='',\n *args, **kwargs):\n\n super(LoadFactOperator, self).__init__(*args, **kwargs)\n self.redshift_conn_id = redshift_conn_id\n self.table = table\n self.sql = sql", "def policy_tables_generator_inputs(wildcards):\n\n # Which policies go into the table?\n # This isn't set in stone; could add or remove if you want.\n if wildcards.table_type == \"expected_fee\":\n audit_policies = [\"uniform\", \"target_x\", \"target_e_low\", \"target_e_high\"]\n\n elif wildcards.table_type in (\"outcome_dwl\", \"outcome_emis\"):\n audit_policies = [\"uniform\", \"target_x\", \"target_e_low\", \"target_e_high\", \"remote_low\", \"remote_high\"]\n else:\n raise ValueError(\"Unknown table_type: \", wildcards.table_type)\n\n if wildcards.audit_tau == \"all-\":\n tau_list = [\"low-\", \"med-\", \"high-\"]\n else:\n # Here audit_tau includes the trailing \"-\"\n assert wildcards.audit_tau.endswith(\"-\")\n tau_list = [wildcards.audit_tau]\n\n input_file_part = []\n for pol in audit_policies:\n if pol in [\"none\", \"remote_low\", \"remote_high\"]:\n amt = \"0pct\"\n else:\n amt = wildcards.audit_amount\n for tau in tau_list:\n tauT = f\"{tau}{wildcards.audit_T}\"\n input_file_part.append(\n f\"audit_outcome_summary_rule={pol}_frac={amt}_tauT={tauT}.parquet\"\n )\n model_dir = STAN_FITS / \"08_twopart_lognormal_heterog_alpha-bootstrap-period_8760_hours\"\n input_files = {\n \"outcome_summaries\": [model_dir / f for f in input_file_part],\n }\n return input_files", "def dump(self, packet):\n # TODO\n packet['type'] = \"table\"\n src = packet['src']\n packet['src'] = packet['dst']\n packet['dst'] = src\n\n table_list = []\n\n # TODO fill out table string with routing table\n table_string = \"\"\n # TODO asking for int indexes instead of string for route?\n for ip in self.updates.keys():\n # TODO have to fill ip address of peer\n\n entry = {'network' : self.updates[ip][MESG][NTWK], 'netmask' : self.updates[ip][MESG][NMSK], 'peer' : ip}\n table_list.append(entry)\n packet[MESG] = table_list\n msg = json.dumps(packet)\n #print(json.dumps(packet, sort_keys=True, indent=4))\n\n sock = self.sockets[src]\n sock.sendall(msg.encode())\n return True", "def install_sample(self, datapath, table_id):\n parser = datapath.ofproto_parser\n ofproto = datapath.ofproto\n # Incoming port 1.\n in_port = 1;\n for timeout in range(60, 1 ,-1):\n # Incoming Ethernet destination\n match = self.create_match(parser,\n {ofproto.OXM_OF_METADATA: timeout})\n # Output to port 2.\n output = parser.OFPActionOutput(2, 0)\n write = parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,\n [output])\n instructions = [write]\n flow_mod = self.create_flow_add(datapath, 100, timeout,\n table_id, match, instructions)\n datapath.send_msg(flow_mod)\n\n print \"sent flow_mod\"", "def exec_before_job( trans, inp_data, out_data, param_dict, tool=None):\n data_name = param_dict.get( 'name', 'Biomart query' )\n data_type = param_dict.get( 'type', 'text' )\n \n name, data = out_data.items()[0]\n data = datatypes.change_datatype(data, data_type)\n data.name = data_name\n out_data[name] = data", "def get_table_output(dataset_id: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n selected_fields: Optional[pulumi.Input[Optional[str]]] = None,\n table_id: Optional[pulumi.Input[str]] = None,\n view: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTableResult]:\n ...", "def load_raw_to_bq(event, context):\n\n import os\n\n\n print(f\"Processing .....\")\n\n file = event\n project = os.environ.get('ENV_PROJECT')\n dataset = os.environ.get('ENV_DATASET')\n bucket = file.get(\"bucket\")\n tableCsv = file.get(\"name\")\n tableDestList = tableCsv.split(\".\")\n tableDest = tableDestList[0]\n table_id = f'{project}.{dataset}.{tableDest}'\n\n from Configuration import Configuration\n\n Configuration(tableCsv,bucket,table_id)\n\n\n print(f\"End Process.\")", "def prepare(self, request):\n pass", "def pre_interface_route_table_create(self, resource_dict):\n pass", "def table_dump_query(table_name, path, rows_per_dump):\n return\"\"\"\n DEFINE TEMP-TABLE tt NO-UNDO LIKE %(table_name)s\n FIELD rec_id AS RECID\n FIELD epoch_time AS INT64.\n\n DEFINE VARIABLE epoch AS DATETIME NO-UNDO.\n DEFINE VARIABLE unixTime AS INT64 NO-UNDO.\n DEFINE VARIABLE htt AS HANDLE NO-UNDO.\n DEFINE VARIABLE cFileName AS CHARACTER NO-UNDO FORMAT \"x(60)\".\n DEFINE VARIABLE rowCount as INT64 NO-UNDO.\n\n epoch = DATETIME(1,1,1970,0,0,0,0).\n rowCount = 0.\n\n htt = TEMP-TABLE tt:HANDLE.\n\n FOR EACH platte.%(table_name)s NO-LOCK:\n IF rowCount = %(rows_per_dump)s THEN DO: \n unixTime = interval(NOW, epoch, \"milliseconds\").\n cFileName = \"%(path)s/t__%(table_name)s__e__\" + STRING(unixTime) + \"__insert.json\".\n htt:WRITE-JSON(\"FILE\", cFileName + \"_partial\", TRUE).\n OS-RENAME VALUE(cFileName + \"_partial\") VALUE(cFileName).\n rowCount = 0.\n EMPTY TEMP-TABLE tt.\n END.\n rowCount = rowCount + 1.\n CREATE tt.\n BUFFER-COPY %(table_name)s TO tt.\n tt.rec_id = RECID(%(table_name)s).\n unixTime = interval(NOW, epoch, \"milliseconds\").\n tt.epoch_time = unixTime.\n END.\n unixTime = interval(NOW, epoch, \"milliseconds\").\n cFileName = \"%(path)s/t__%(table_name)s__e__\" + STRING(unixTime) + \"__insert.json\".\n htt:WRITE-JSON(\"FILE\", cFileName + \"_partial\", TRUE).\n OS-RENAME VALUE(cFileName + \"_partial\") VALUE(cFileName)\n \n\"\"\" % {'path': path, 'table_name': table_name, 'rows_per_dump': rows_per_dump}", "def RequestData(self, request, inInfo, outInfo):\n # Get output:\n output = self.GetOutputData(outInfo, 0)\n # Get requested time index\n i = _helpers.get_requested_time(self, outInfo)\n if self.need_to_read():\n self._read_up_front()\n # Get the data which has already been loaded\n data = self._get_raw_data(idx=i) # these should just be XYZ+attribute\n # in either a numpy array or a pandas dataframe where first three\n # columns are the XYZ arrays\n output.DeepCopy(interface.points_to_poly_data(data))\n return 1 # NOTE: ALWAYS return 1 on pipeline methods", "def _model_table(self, create_table_query, insert_query, parameters):\n try:\n self.session.execute(create_table_query)\n self._insert_data(insert_query, parameters)\n except Exception as e:\n print('Error on creating table for query. ' + str(e))", "def _get_table(self, cursor):\n raise NotImplementedError", "def prep(self):\n sq1 = 'create table TCVR ( ID, T, C, V, R , primary key ( ID ) ) ;'\n sq2 = 'create table IDX ( ID , A , primary key(A) ) ; '\n self.sq.SQX(sq1)\n self.sq.SQX(sq2)\n sq3 = \"insert into IDX VALUES ( 1 , 'A' ) ; \"\n self.sq.SQX(sq3)", "def __init__(self, table, ioloop, iex_source, **kwargs):\n data_cleaner = kwargs.pop(\"data_cleaner\")\n super(IEXStaticDataSource, self).__init__(\n table, ioloop, data_cleaner=data_cleaner\n )\n self._iex_source = iex_source\n self._iex_source_kwargs = kwargs", "def on_get(self, req, resp, table):\n user = req.context['user']\n columns = req.params['column'] if 'column' in req.params else None # columns to query\n start = req.params['start'] if 'start' in req.params else None # pagination: start id\n limit = req.params['limit'] if 'limit' in req.params else None # pagination: row limit\n where = base64.b64decode(req.params['where']) if 'where' in req.params else None # query filters\n\n engine = user_db_engine(user)\n key = _make_key(engine, table, columns, start, limit)\n resp.context['cache_key'] = key\n if config.use_cache() and cache.contains_query(key):\n resp.context['cache_hit'] = True\n resp.status = falcon.HTTP_200\n else:\n result, count = _select(engine, table, columns=columns, start=start, limit=limit, where=where)\n\n if config.use_cache():\n resp.context['cache_miss'] = True\n resp.context['result'] = { 'result': 'ok', 'data': result, 'total': count }\n resp.status = falcon.HTTP_200\n\n pagi = \" start from id {} limit {}\".format(start, limit) if start and limit else \"\"\n log.info(\"user [{}]: get table({}) [{}]{}\".format(user['user'], columns if columns else \"*\", table, pagi))", "def copy_string_to_table(self, schema, table, s, separator=\",\"):\n #fields = (separator + \" \").join(self.schemas[schema][table][0])\n fields = s.split(\"\\n\")[0].replace(separator, \",\")\n sql = f'set role {self.write_role}; ' \\\n f'COPY {schema}.{table}( {fields} ) FROM stdin WITH DELIMITER \\'{separator}\\' CSV header;'\n return sql, StringIO(s)", "def _create_filter_session(self,table_name):\n # import pdb;pdb.set_trace()\n if not self.HEADER_NAME in session:\n session[self.HEADER_NAME] = {}\n if not table_name in session[self.HEADER_NAME]:\n session[self.HEADER_NAME][table_name] = {self.FILTERS_NAME:{},self.ORDERS_NAME:[]}\n \n return session[self.HEADER_NAME][table_name]", "def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()", "def __init__(self, table_name='casbin_rule', **kwargs):\n self.table_name = table_name\n self.dynamodb = boto3.client('dynamodb', **kwargs)\n try:\n\n self.dynamodb.create_table(\n TableName=self.table_name,\n\n AttributeDefinitions=[\n {\n 'AttributeName': 'id',\n 'AttributeType': 'S'\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'id',\n 'KeyType': 'HASH'\n },\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10\n }\n )\n except self.dynamodb.exceptions.ResourceInUseException:\n pass", "def build_input_table(cls, name='inputTableName', input_name='input'):\n obj = cls(name)\n obj.exporter = 'get_input_table_name'\n obj.input_name = input_name\n return obj", "def create_table_execute(self):\n self.execute(query=self.default_template.format(self.table_name), data=None)", "def create_buffer(self, context, connection, table, *, engine):\n buffer = yield Queue()\n for row in self.commit(table, connection, buffer, force=True):\n context.send(row)", "def __gen_datatable__(self):\n # | - __generate_data_table\n rows_list = []\n for Job_i in self.Job_list:\n # | - FOR LOOP BODY\n entry_param_dict = {}\n for prop, value in Job_i.job_params.items():\n entry_param_dict[prop] = value\n\n entry_param_dict[\"Job\"] = Job_i\n entry_param_dict[\"path\"] = Job_i.full_path\n entry_param_dict[\"max_revision\"] = Job_i.max_revision\n entry_param_dict[\"revision_number\"] = Job_i.revision_number\n\n rows_list.append(entry_param_dict)\n # __|\n\n data_frame = pd.DataFrame(rows_list)\n\n return(data_frame)\n # __|", "def start_table(self):\n raise NotImplementedError", "def _seek_to_table(self, table):\n\n self.stream.seek(self.table_pointers[table])" ]
[ "0.5877059", "0.54798776", "0.53474665", "0.53415745", "0.53346676", "0.5334067", "0.5324117", "0.53053784", "0.5297837", "0.5297153", "0.52415204", "0.521387", "0.52090687", "0.51502633", "0.5119904", "0.51124084", "0.5107619", "0.5088214", "0.5088214", "0.50813437", "0.5080283", "0.5069527", "0.50639933", "0.5050355", "0.50459665", "0.50412464", "0.50114733", "0.5008059", "0.49965745", "0.49963608", "0.49824083", "0.4981648", "0.49719244", "0.4954799", "0.49544752", "0.4952323", "0.4952323", "0.4952323", "0.4952323", "0.4952323", "0.4952323", "0.4952323", "0.4952323", "0.4952323", "0.4952323", "0.4952323", "0.49518347", "0.49491188", "0.49474648", "0.4944835", "0.49382523", "0.4918508", "0.49181443", "0.49036977", "0.49014884", "0.48933572", "0.48925", "0.48814082", "0.4870558", "0.48694822", "0.48647615", "0.48632163", "0.4860718", "0.48582864", "0.4857456", "0.4851853", "0.48491603", "0.48472404", "0.48472404", "0.4825812", "0.48248327", "0.48120102", "0.48063818", "0.48008162", "0.48002067", "0.47984236", "0.47967243", "0.47837356", "0.4782044", "0.47764456", "0.4769552", "0.476743", "0.4752315", "0.47426233", "0.47423765", "0.47387558", "0.47379", "0.47294325", "0.47284436", "0.47241434", "0.47179836", "0.47143027", "0.47131035", "0.47112563", "0.47072914", "0.4700879", "0.4699274", "0.469381", "0.4685817", "0.46798208" ]
0.5579226
1
Used a timedbound function to test that the engine can properly reach the source
def test_connection(self) -> None: test_connection_fn = get_test_connection_fn(self.service_connection) test_connection_fn(self.engine)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_source_3(self):\n self.eval_flags[\"check_host_typo\"] = False\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 1)", "def test_check_source_9(self):\n self.src1._host_host_genus = \"Gordonia\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\", \"warning\")\n self.assertEqual(count, 1)", "def test_check_source_10(self):\n self.src1._lab_host_host_genus = \"Gordonia\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\", \"warning\")\n self.assertEqual(count, 1)", "def test_check_source_5(self):\n self.src1.host = \"\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 3)", "def test_missing_data_sources(self):", "def test_data_source_soaps_get(self):\n pass", "def check(self, runtime):", "def test_get_source_log(self):\n pass", "def test_check_source_2(self):\n self.eval_flags[\"check_id_typo\"] = False\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 3)", "def check():", "def test_data_source_soaps_post(self):\n pass", "def test_check_source_11(self):\n self.src1._organism_host_genus = \"Mycobacterio\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\")\n self.assertEqual(count, 0)", "def probe(self):", "def test_check_source_6(self):\n self.src1.lab_host = \"\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 3)", "def test_data_source_soaps_change_stream_get(self):\n pass", "def test_check_source_1(self):\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 4)", "def test_watch_no_source():\n with pytest.raises(ValueError):\n uflash.watch_file(None, lambda: \"should never be called!\")", "def test_client_twrr_performance(self):\n pass", "def test_point_onsource(self):\n cb_name = \"point_onsource_cb\"\n cb_updates_name = \"point_onsource_cb_updates\"\n client = self.__class__.client\n\n test_src = TAMS_Source(\n name=\"0537-441\",\n ra=1.478465645926414,\n dec=-0.7694426542639248\n )\n\n client.proxy.point_onsource(test_src.toDict(), cb_info={\n \"cb_handler\":client,\n \"cb\":cb_name,\n \"cb_updates\": cb_updates_name\n })\n\n def updates_check():\n \"\"\"\n Check to see if the updates_cb is returning the correct information\n \"\"\"\n data = wait_for_callback(client, cb_updates_name)\n self.assertTrue(isinstance(data, dict))\n\n result = wait_for_callback(client, cb_name, secondary_cb=updates_check)\n self.assertTrue(isinstance(result, dict))", "def test_check_source_8(self):\n self.src1._organism_host_genus = \"Gordonia\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\", \"warning\")\n self.assertEqual(count, 1)", "def test_full_house_flush_ind(self):", "def test_static_static(self):\n source = procedural.WhiteNoise(0.5)\n source = media.StaticSource(source)\n source = media.StaticSource(source)\n player = media.Player()\n player.queue(source)\n player.play()", "def test_get_run(self):\n pass", "def test_emirp_check():\r\n pass", "def test_ctcpQuery_SOURCE(self):\n self.client.sourceURL = \"url\"\n self.client.ctcpQuery_SOURCE(self.user, self.channel, \"data\")\n self.assertEqual(\n self.client.methods,\n [(\"ctcpMakeReply\", (\"Wolf\", [(\"SOURCE\", \"url\"), (\"SOURCE\", None)]))],\n )", "def runTest(self):\n return True", "def test_log_track_alternate_source(self):\n track = self.app.log_track(self.track_obj('silence.mp3'), source='car')\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(track.pk)\n self.assertNotEqual(track_row, None)\n self.assertEqual(track_row['artist'], 'Artist')\n self.assertEqual(track_row['album'], 'Album')\n self.assertEqual(track_row['title'], 'Track')\n self.assertEqual(track_row['source'], 'car')\n self.assertEqual(track_row['album_id'], 0)\n self.assertEqual(track_row['tracknum'], 1)\n self.assertEqual(track_row['seconds'], 2)\n self.assertEqual(track_row['lasttransform'], 0)\n \n # This is a bit fuzzy, since in a worst-case scenario we may have\n # timestamps differing by a second or so. To be extra-cautious,\n # we'll just make sure the timestamp is +/- ten seconds of\n # what we think it should be.\n timestamp = track_row['timestamp'].timestamp()\n now_ts = datetime.datetime.now().timestamp()\n self.assertGreater(timestamp, now_ts-10)\n self.assertLess(timestamp, now_ts+10)", "def test_select_ttl_failure(self):", "def test_local_cache():", "def test_issue_tracked_times(self):\n pass", "def test_init_no_source():\n frame_ingestor = FrameIngestor()\n assert not frame_ingestor._source", "def test_check_source_7(self):\n self.src1._organism_name = \"Trixie\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\", \"warning\")\n self.assertEqual(count, 1)", "def test_get_raw(self):\n i = Ignition(EXAMPLE_ENGINE_URL_0)\n status, e_url = i.engine_url_at_state(EngineUrlCase.RAW)\n self.assertEqual(EngineUrlStatus.OK, status)\n self.assertEqual(EXAMPLE_ENGINE_URL_0, e_url)", "def test_time(self):\r\n pass", "def probe(self):\n return False", "def test_cost_usage_source_is_reachable(self):\n authentication = {}\n bucket_name = \"/tmp/path/to/bucket\"\n\n provider = GCPLocalProvider()\n\n self.assertTrue(provider.cost_usage_source_is_reachable(authentication, bucket_name))", "def test_bad_host():\n stream = TweetStream(\"foo\", \"bar\", url=\"http://bad.egewdvsdswefdsf.com/\")\n assert_raises(ConnectionError, stream.next)\n\n stream = FollowStream(\"foo\", \"bar\", [1, 2, 3], url=\"http://zegwefdsf.com/\")\n assert_raises(ConnectionError, stream.next)\n\n stream = TrackStream(\"foo\", \"bar\", [\"foo\"], url=\"http://aswefdsews.com/\")\n assert_raises(ConnectionError, stream.next)", "def test_prepare_source(source):\n assert isinstance(PseudoPotentialData.prepare_source(source), io.BytesIO)\n\n if isinstance(source, io.BytesIO):\n # If we pass a bytestream, we should get the exact same back\n assert PseudoPotentialData.prepare_source(source) is source", "def test_cost_usage_source_is_reachable(self):\n credentials = {\"role_arn\": \"arn:aws:s3:::my_s3_bucket\"}\n data_source = {\"bucket\": self.cur_source}\n\n provider_interface = AWSLocalProvider()\n\n try:\n provider_interface.cost_usage_source_is_reachable(credentials, data_source)\n except Exception as error:\n self.fail(f\"Unexpected Error: {str(error)}\")", "def test_runGame(self):\n # this is tested by playing the game. No good way to unit test this.\n pass", "def _tell_source(self) -> int:\n raise NotImplementedError() # pragma: no cover", "def testHeldLightSource(self):\n torch = objects.Thing(store=self.store, name=u\"torch\")\n objects.LightSource.createFor(torch, candelas=80)\n\n objects.Container.createFor(self.observer, capacity=1000)\n\n torch.moveTo(self.observer)\n\n self.assertEquals(visibles(self.observer.idea),\n [self.observer, torch, self.location, self.rock])", "def test_connection(self):\n try:\n if 0 <= self.get_wavelength() <= 10e-6:\n return True\n except Exception:\n print(\"Self test failed.\")\n return False", "def goodmorning(host):", "def test_log_track_invalid_source(self):\n with self.assertRaises(Exception):\n self.app.log_track(self.track_obj('silence.mp3'), source='foo')\n self.assertEqual(self.get_track_count(), 0)", "def event11512000():\n header(11512000)\n end_if_this_event_on()\n if_player_owns_good(0, GOOD.Lordvessel)\n flag.enable(11512000)", "def test_plays_get(self):\n pass", "def test_sources_not_ok_on_connection_error(self):\n measurement = self.measurement(\n self.metric(),\n sources=[\n {\n \"source_uuid\": SOURCE_ID,\n \"value\": None,\n \"total\": None,\n \"parse_error\": None,\n \"connection_error\": \"Oops!\",\n },\n {\n \"source_uuid\": SOURCE_ID2,\n \"value\": \"7\",\n \"total\": \"100\",\n \"parse_error\": None,\n \"connection_error\": None,\n },\n ],\n )\n self.assertFalse(measurement.sources_ok())", "def getSourceStamp():\n pass", "def getSourceStamp():\n pass", "def event11510150():\n header(11510150, 0)\n network.disable_sync()\n if_host(1)\n if_player_has_item(1, ItemType.good, 115)\n if_player_inside_region(1, 1512101)\n if_condition_true(0, 1)\n end()", "def test_27(self):\n assert 'False' == Api.requestBlock('test-27')", "def test_compute_glycemic_load(self):\n pass", "def test_poweredUp(self):\n self.assertIdentical(\n IOneTimePadGenerator(self.store),\n self.store.findUnique(AMPConfiguration))", "def source_changed(source, cache):\n return os.path.getmtime(source)>os.path.getmtime(cache)", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def test_get_frame_no_source():\n frame_ingestor = FrameIngestor()\n with pytest.raises(RuntimeError):\n frame_ingestor.get_frame()", "def test_sources_ok(self):\n measurement = self.measurement(\n self.metric(),\n sources=[\n {\"source_uuid\": SOURCE_ID, \"value\": \"5\", \"total\": \"100\", \"parse_error\": None, \"connection_error\": None},\n {\n \"source_uuid\": SOURCE_ID2,\n \"value\": \"7\",\n \"total\": \"100\",\n \"parse_error\": None,\n \"connection_error\": None,\n },\n ],\n )\n self.assertTrue(measurement.sources_ok())", "def test_get_scan(self):\n pass", "def think(self):\n pass", "def testPipeFound(self):\n safeFoundHelper(self)\n self.assertCurrentState(safe.Seeking)", "def realsense():\n pass", "def sniffing():\n sniff(store=False, prn=lambda p: threading.Thread(target=next, args=(p,)).start(), iface=IFACE)", "def test_alive():\n pass", "def test_alive():\n pass", "def test_alive():\n pass", "def test():\n pass", "def test_gameHandleEvents(self):\n # this kinda gonna be reiterating the other tests??\n # the tests of all the individual methods below make this test work\n pass", "def test_other_no_sources(monkeypatch, bot, bot_arg, update):\n monkeypatch.setattr(fake_log, 'source', lyricfetch.sources[-1])\n bot.log_result('chat_id', fake_log)\n\n other(bot_arg, update)\n assert 'No other sources' in bot_arg.msg_log[0]", "def test_verify_connection_to_a_device():", "def test_wrongsyn():\n assert _socli.wrongsyn(\"sdf\") == None", "def _test_out_of_range(self):\n self.cdbconf.setup('KKG')\n self.cdbconf.setConfiguration('CUSTOM_OPT')\n az, el, latitude = [radians(50)] * 3\n site_info = {'latitude': latitude}\n self.p.setup(site_info, self.source, self.device)\n self.p.setRewindingMode('AUTO')\n offset = 20\n max_limit = self.device.getMaxLimit() \n min_limit = self.device.getMinLimit()\n Pis = max_limit - offset/2\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setPosition(Pis)\n time.sleep(0.2) # Wait a bit for the setup\n max_rewinding_steps = (max_limit - min_limit) // self.device.getStep()\n expected = Pis - max_rewinding_steps*self.device.getStep() + offset\n self.source.setAzimuth(az)\n self.source.setElevation(el)\n self.p.startUpdating('MNG_TRACK', 'ANT_NORTH', az, el, None, None)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setOffset(offset)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.assertEqual(self.device.getActPosition(), expected)", "def check_stability(self):", "def getSource():", "def test_start_scan(self):\n pass", "def test_time_ss(self):\n result = self.test_client.ss\n\n assert result is None", "def test_register_source(mock_source):\n frame_ingestor = FrameIngestor()\n frame_ingestor.register_source(mock_source)\n assert frame_ingestor._source == mock_source\n mock_source.start.assert_called_once()", "def LocalReplaySupport(self): # real signature unknown; restored from __doc__\n pass", "def test():", "def test():", "def check(self, runtime):\n return True", "def _test(self, c):\n\treturn self.UNRESOLVED\t\t# Placeholder", "def test_run_ended(self):", "def test_get_host_access(self):\n pass", "def test_all(self):\n # verify / source / run\n src = self.tmp()\n open(src, 'w').close()\n bscript = BaseScript(src)\n BaseScript.verify(src)\n self.assertEqual(bscript.source(), '')\n self.assertRaises(NotImplementedError, bscript.run, 'foobar')", "def test_output_exists():\n assert song_decoder(\"WUWUBUBWUBUWUB\") is not None", "def ping(self):\n raise AssertionError(\"Ping function is not implemented\")", "def test_sounds_get(self):\n pass", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def probe(self):\n return", "def test_simple_source_constructor_exception():\n TESTPATH = \"/usr/local/share/testfile.mp3\"\n with pytest.raises(robox.RDJResourceErr):\n test01 = Source(path=TESTPATH, exist=True)", "def test_data_source_soaps_find_one_get(self):\n pass", "def ping(self):\n pass", "def test_get_stream(self):\n pass", "def check(self) -> None:" ]
[ "0.6038599", "0.5966835", "0.58349615", "0.58176917", "0.57997555", "0.57887405", "0.5772166", "0.57598495", "0.5749875", "0.5705047", "0.5690391", "0.5682786", "0.5672137", "0.5664311", "0.56611896", "0.5638981", "0.5601413", "0.55772096", "0.5541266", "0.5538311", "0.5491809", "0.54828316", "0.5479333", "0.5465284", "0.5462544", "0.5461545", "0.5448486", "0.54433304", "0.542927", "0.5420151", "0.5418411", "0.5405647", "0.53967446", "0.5393216", "0.5373773", "0.5360445", "0.5343611", "0.5329388", "0.5326685", "0.5314219", "0.5312308", "0.5309036", "0.53066546", "0.53042895", "0.53000987", "0.5299753", "0.5293839", "0.52911437", "0.5262767", "0.5262767", "0.52590173", "0.5254047", "0.5252716", "0.5237538", "0.523533", "0.52341205", "0.52341205", "0.52341205", "0.52318746", "0.52277267", "0.5226878", "0.52181184", "0.5211972", "0.521178", "0.5210465", "0.5207586", "0.5207586", "0.5207586", "0.52047265", "0.52007836", "0.51875544", "0.5184123", "0.51799536", "0.51785296", "0.5177553", "0.51628405", "0.51624906", "0.51501954", "0.5145527", "0.514206", "0.5139878", "0.5139878", "0.51359785", "0.51208127", "0.51202697", "0.51193243", "0.5117417", "0.5110915", "0.51073277", "0.51046085", "0.51041585", "0.51041585", "0.51041585", "0.51041585", "0.51041585", "0.50877017", "0.50823736", "0.50797254", "0.50776505", "0.5072839", "0.50684583" ]
0.0
-1
Return the SQLAlchemy connection
def connection(self) -> Connection: if not self._connection: self._connection = self.engine.connect() return self._connection
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_connection(cls):\n return cls.database.connection", "def get_connection():\n\t# flask.g documentation: http://flask.pocoo.org/docs/0.12/api/#flask.g\n\ttry:\n\t\tconn = flask.g._database_connection\n\texcept AttributeError:\n\t\tconn = flask.g._database_connection = sqlite3.connect(config.PATH_DATABASE,\n\t\t\t\tdetect_types=sqlite3.PARSE_DECLTYPES) # allows storing datetime, etc.\n\t\tconn.row_factory = sqlite3.Row\n\treturn conn", "def get_connection(db_url=None):\n return engine(db_url).connect()", "def get_connection():\n con = psycopg2.connect(**DB_CONFIG)\n return con", "def get_connection(self):\n\n\t\treturn dbapi.connect(credentials.SERVER,\\\n\t\t\t\t\t\t\t credentials.PORT,\\\n\t\t\t\t\t\t\t credentials.USER,\\\n\t\t\t\t\t\t\t credentials.PASSWORD)", "def dbsession(cls):\n sqlahelper = cls.dbsqlahelper\n return sqlahelper.getmake_session()", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def __connect(self):\n session, metadata, connection = db(dbhost=getattr(self, \"host\"),\n dbuser=getattr(self, \"user\"),\n dbpass=getattr(self, \"password\"),\n dbname=getattr(self, \"dbname\"))\n return session, metadata, connection", "def conn(self):\n conn = self.engine.connect()\n return conn", "def getConnection(self):\n if (not self.initialized):\n logging.error(\"Module is not initialized\")\n \n conn_options = {\n 'user': self.user,\n 'password' : self.password,\n 'host' : self.host,\n 'port' : self.port,\n 'database' : self.dbname,\n 'raise_on_warnings': True\n }\n db = mysql.connector.connect(**conn_options)\n return db", "def get_db():\n if not hasattr(g, 'db_connection'):\n g.db_connection = connect_db()\n return g.db_connection", "def db_connect():\n return create_engine(URL(**DATABASE))", "def db_connect():\n DB_SETTINGS = app.config['DB_SETTINGS']\n engine = create_engine(URL(**DB_SETTINGS))\n connection = engine.connect()\n return connection", "def getconn(self):\n #hdbport = int('3%s15' % Settings.hdbinstancenum)\n con = dbapi.connect(address = self.host, \\\n port = self.port, \\\n user = self.username, \\\n password = self.password, \\\n autocommit = True)\n if self.schema:\n cur = con.cursor()\n try:\n cur.execute('ALTER SESSION SET CURRENT_SCHEMA = %s' % self.schema)\n return con\n except dbapi.Error, err:\n cur.close()\n con.close()\n cur = None\n raise err\n finally:\n if cur:\n cur.close()\n else:\n return con", "def get_conn(self):\n conn = sqlite3.connect(self.uri)\n conn.row_factory = sqlite3.Row\n return conn", "def connect(self):\n\n self.logger.debug(\"creating DB connection\")\n conn = sql.connect(**self.connection_arguments)\n self.logger.debug(\"DB connection ready: %r\", conn.get_host_info())\n return conn", "def connection(self):\n return self.session.connection", "def get_connection(self):\n return self.connection", "def get_connection(self):\n return self.connection", "def get_connection(pgpass_path='~/.pgpass', db=DB):\n host, port, user, password, db = get_credentials(path=pgpass_path, db=db)\n db_url = f'postgresql://{user}:{password}@{host}:{port}/{db}'\n conn = sqlalchemy.create_engine(db_url).connect()\n return conn", "def connect(self):\n if self.type != 'sqlite':\n # log non-sqlite uses of raw connections for troubleshooting, since\n # unless the developer had a good reason to use this instead of\n # `session()`, it indicates the plugin was written before Sopel 7.0\n # and might not work right when connected to non-sqlite DBs\n LOGGER.info(\n \"Raw connection requested when 'db_type' is not 'sqlite':\\n\"\n \"Consider using 'db.session()' to get a SQLAlchemy session \"\n \"instead here:\\n%s\",\n traceback.format_list(traceback.extract_stack()[:-1])[-1][:-1])\n return self.engine.raw_connection()", "def get_db_connection():\n db = sqlite3.connect(config.PERSISTENCE_LOCATION, check_same_thread=False)\n db.isolation_level = None\n db.row_factory = sqlite3.Row\n return db", "def get_connection(self):\n return self.application.get_connection()", "def connect_db():\n\n # use nullpool because pooling breaks unit tests and we don't need the performance\n return sqlalchemy.create_engine(\n 'postgresql://' +\n app.config[ 'DATABASE_USER' ] + ':' +\n app.config[ 'DATABASE_PASSWORD' ] + '@' +\n app.config[ 'DATABASE_HOST' ] + '/' +\n app.config[ 'DATABASE' ],\n poolclass = sqlalchemy.pool.NullPool\n )", "def get_db():\n if not hasattr(g, 'Session'):\n Session, engine = db_connect(database_path=app.config['DATABASE_PATH'],\n ensure_db_exists=False)\n g.Session = Session\n\n return g.Session", "def get_sql_connection(self):\n return self.sql", "async def _get_db_connection():\n return await gino.Gino(get_database_dsn())", "def getDbConnection(self, **kwargs):\r\n \r\n con = sql.connect(self._filename, **kwargs)\r\n con.row_factory = sql.Row\r\n return con", "def connection(self):\n\n ctx = _app_ctx_stack.top\n if ctx is not None:\n if not hasattr(ctx, 'mysql_db'):\n ctx.mysql_db = self.connect\n return ctx.mysql_db", "def get_conn(self):\n return self.get_connection(self.mssql_conn_id)", "def conn(self):\n try:\n if self._db is None:\n self._db = sqlc.connect(user=self.login,\n password=self.passwd,\n host=self.host,\n database=self.database)\n\n except sqlc.Error as e:\n print (\"MySQL exception #{0} getting connection: {1}\".format(e.errno, e.msg))\n if e.errno == 2003:\n exit(-1)\n except Exception as e:\n print (\"Couldn't get connection property: {0}\".format(e.message))\n finally:\n return self._db", "def get_connection(self):\n return self._connection", "def get_connection(self):\n return self._connection", "def db_connect():\n return create_engine(URL(**settings.DATABASE))", "def db_connect():\n return create_engine(URL(**settings.DATABASE))", "def db_connect():\n return create_engine(URL(**settings.DATABASE))", "def db_connect():\n return create_engine(URL(**settings.DATABASE))", "def db_connect():\n return create_engine(URL(**settings.DATABASE))", "def db_connect():\n if 'db' not in g:\n g.db = sql.connect(current_app.config[\"DATABASE\"], detect_types=sql.PARSE_DECLTYPES)\n g.db.row_factory = sql.Row\n return g.db", "def getDbConnection(self):\n return self._oDb;", "def get_connection():\n\n return MongoClientManager().client.__getattr__(MONGODB_SETTINGS['db'])", "def connection(self):\n return self.get_connection()", "def create_connection():\r\n try:\r\n conn = sq.connect(DBClass.db_name)\r\n except sq.Error as e:\r\n raise e\r\n \r\n return conn", "def getConnection(self):\n\n return self._connection", "def connect_db():\n engine = create_engine('sqlite:///app.db', echo=True)\n Base.metadata.bind = engine\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n return session", "def get_conn(cls):\n\n if not cls.conn or not cls.conn.open:\n cls.connect()\n\n try:\n cls.conn.ping() # ping to test if the current conn is working\n except MySQLdb.OperationalError:\n cls.connect()\n\n return cls.conn", "def get_connection(db, user=env.user, host=env.host, password=env.password):\n return f'mysql+pymysql://{user}:{password}@{host}/{db}'", "def _get_connection(self, conf):\n return get_session()", "def obtainDatabaseConnection(self):\n\t\tself.databaseConnector = DatabaseConnector()", "def __get_connection():\n # 根据配置文件创建连接池\n if not Mysql.__mysql_pool:\n Mysql.__mysql_pool = PooledDB(\n creator=MySQLdb,\n use_unicode=False,\n cursorclass=DictCursor,\n db=sqlconf.MysqlConfig['db'],\n host=sqlconf.MysqlConfig['host'],\n port=sqlconf.MysqlConfig['port'],\n user=sqlconf.MysqlConfig['user'],\n passwd=sqlconf.MysqlConfig['passwd'],\n charset=sqlconf.MysqlConfig['charset'],\n mincached=sqlconf.MysqlConfig['mincached'],\n maxcached=sqlconf.MysqlConfig['maxcached'],\n maxconnections=sqlconf.MysqlConfig['maxconnections'])\n # 返回连接池中连接对象\n return Mysql.__mysql_pool.connection()", "def get_connection(dsn):\n try:\n db_url = make_url(dsn)\n engine = create_engine(db_url)\n return engine.connect()\n except exc.OperationalError:\n raise RuntimeError(\"Database %s does not exist\" % db_url.database)", "def connect_sqlalchemy(\n self,\n url=None,\n **kwargs\n ):\n if url is not None:\n self.engine = create_engine(url, **kwargs)\n else:\n self.engine = create_engine(\n \"oracle+cx_oracle://{}:{}@{}\".format(self.user_id, self.password, self.dsn), **kwargs\n )\n return self.engine", "def get_connection(self, session_cls=None):\n # If this connection has to be created within an existing session,\n # ``session_cls`` will be provided as an argument.\n # Otherwise, fetch a new ``session_cls`` from ``get_session()``\n if session_cls is None:\n session_cls = self.get_session()\n\n conn = session_cls()\n conn = self._execute_database_specific_connection_statements(conn)\n\n return conn", "def get_db():\n conn = g.get('sqlite_db', None)\n if conn is None:\n conn = g.sqlite_db = connect_db()\n return conn", "def sql_connection():\n return sqlite3.connect('database.db')", "def connect_sqlalchemy():\n \n path = os.path.dirname(os.path.abspath(__file__))\n server = yaml.load(open(path + '/config.yaml', 'r'))\n \n server = server['servers'][229]\n \n host = server['host']\n database = server['database']\n user = server['user']\n password = server['password']\n\n from sqlalchemy import create_engine\n url = 'postgresql://{}:{}@{}/{}'\n url = url.format(user, password, host, database)\n return create_engine(url)", "def connect_to_database():\n engine = create_engine('postgresql://catalog:password@localhost/catalog')\n Base.metadata.bind = engine\n db_session = sessionmaker(bind=engine)\n session = db_session()\n return session", "def get_db():\n if not hasattr(g, \"sql_db\"):\n g.sql_db = connect_db()\n return g.sql_db", "def __enter__(self) -> cx_Oracle.connect:\n self.db_connection = cx_Oracle.connect(\n self.config['oracle']['ora_user'],\n self.config['oracle']['ora_pass'],\n f\"{self.config['oracle']['ora_host']}/{self.config['oracle']['ora_sid']}\")\n return self.db_connection", "def get_db_connection_url():\n return os.environ[\"DATABASE_URL\"]", "def get_dbapi20_connection ():\n return cherrypy.engine.pool.connect ()", "def connectToDB():\r\n Base = declarative_base()\r\n engine = create_engine('sqlite:///DBLatency.db', echo=True)\r\n Base.metadata.bind = engine\r\n DBSession = sessionmaker(bind=engine)\r\n session = DBSession()\r\n return session", "def get_sql_session():\n session = sessionmaker(connect_to_db())\n return session()", "def connect_sqlalchemy():\n username = os.getenv('db_user')\n password = os.getenv('db_password')\n database = os.getenv('db_name')\n host = os.getenv('db_host')\n\n if username is None or password is None or database is None or host is None:\n raise Exception(\"\"\"Cannot connect to SQLAlchemy Engine. Database configurations are not set in env.\n \\n Set env like following:\n \\t export db_host=example.com\n \\t export db_name=my_db_name\n \\t export db_user=my_db_user\n \\t export db_password=my_db_password\"\"\")\n engine = create_engine('mysql://%s:%s@%s/%s' % (username, password, host, database))\n return engine.connect()", "async def get(self):\n if self._connect_kwargs == None:\n raise IllegalAccessError(\"DB connection parameters not set yet\")\n\n if not hasattr(self._tl, \"conn\"):\n self._tl.conn = await r.connect(**self._connect_kwargs)\n\n return self._tl.conn", "def connect_to_db(self):\n\t\t# connection = psycopg2.connect(database=config.database, user=config.user,password = config.password)\n\t\tconnection = psycopg2.connect(database=config.database, user=config.user)\n\t\treturn connection", "def get_connection():\n connection = mdb.connect(host='localhost',\n user='root',\n passwd='password',\n database='pur_beurre')\n return connection", "def get_connection(self):\n if self.__connection is None:\n from pymongo import MongoClient\n from ir_config import IRConfig\n self.__connection = MongoClient(\n IRConfig.get_instance().get('db_host', self.__default_host), \n IRConfig.get_instance().get_int('db_port', self.__default_port))\n return self.__connection", "def get_db():\n # when used with a `file` object, `with` ensures it gets closed\n # pylint: disable=no-member\n with file('config.json') as config_file:\n config = json.load(config_file)\n return cx_Oracle.connect(config['user'], config['pass'], config['host'])", "def get_db():\n # when used with a `file` object, `with` ensures it gets closed\n # pylint: disable=no-member\n with file('config.json') as config_file:\n config = json.load(config_file)\n return cx_Oracle.connect(config['user'], config['pass'], config['host'])", "def get_db():\n if ( g.get( 'db' ) is None ):\n g.db = connect_db()\n\n return g.db.connect()", "def get_sql_conn():\r\n\r\n # get config information\r\n config = configparser.ConfigParser()\r\n config.sections()\r\n config.read('../config.ini')\r\n dbname = config['PostgresDB']['db_name']\r\n host = config['PostgresDB']['host']\r\n port = config['PostgresDB']['port']\r\n user = config['PostgresDB']['user']\r\n pw = config['PostgresDB']['pw']\r\n\r\n # connect to the database\r\n conn = psycopg2.connect(host=host, port=port, database=dbname,\r\n user=user, password=pw)\r\n return conn", "def get_db_conn(db_config):\n return psycopg2.connect(\n \"dbname='{}' user='{}' host='{}'\".format(\n db_config[\"name\"],\n db_config[\"user\"],\n db_config[\"host\"]\n )\n )", "def connect(self):\n if self.connection is not None:\n logger.info(\" connection: %s \" % (self.connection is not None))\n return self.connection\n try:\n self.connection = DataPostgres.connect(**self.options)\n except Exception as e:\n logger.critical(\"Unable to connect to DB: {0}\".format(e.message))\n raise\n\n return self.connection", "def _get_connection(self) -> sqlite3.Connection:\n curr_thread = threading.get_ident()\n if curr_thread not in self.conn or self.conn[curr_thread] is None:\n conn = sqlite3.connect(self.db_path, check_same_thread=False)\n conn.row_factory = sqlite3.Row\n self.conn[curr_thread] = conn\n return self.conn[curr_thread]", "def _get_connection(reconnect=False):\n global _connection\n identity = get_identity()\n # Connect to the database if not already connected\n if _connection.get(identity) is None or reconnect:\n try:\n _connection[identity] = Connection(**_connection_settings)\n except Exception, e:\n raise ConnectionError(\"Cannot connect to the database:\\n%s\" % e)\n return _connection[identity]", "def connect():\n # global ENGINE\n # global Session\n\n # ENGINE = create_engine(\"sqlite:///ratings.db\", echo=True)\n # Session = sessionmaker(bind=ENGINE)\n\n # return Session()\n pass", "def db_connect():\n return create_engine(URL(**product_crawlers.settings.DATABASE))", "def _get_connection(self) -> Connection:\n # TODO(101) is there a problem with having just one db connection?\n # Will this cause bugs with failed commits?\n curr_thread = threading.get_ident()\n if curr_thread not in self.conn or self.conn[curr_thread] is None:\n try:\n conn = sqlite3.connect(self.db_path)\n conn.row_factory = StringIDRow\n self.conn[curr_thread] = conn\n except sqlite3.Error as e:\n raise MephistoDBException(e)\n return self.conn[curr_thread]", "def get_connection(self):\n import psycopg2 as dbapi\n self.get_input()\n conn = dbapi.connect(host=self.opts[\"host\"],\n port=int(self.opts[\"port\"]),\n user=self.opts[\"user\"],\n password=self.opts[\"password\"],\n database=self.opts[\"database\"])\n encoding = ENCODING.lower()\n if self.script.encoding:\n encoding = self.script.encoding.lower()\n encoding_lookup = {'iso-8859-1': 'Latin1', 'latin-1': 'Latin1', 'utf-8': 'UTF8'}\n db_encoding = encoding_lookup.get(encoding)\n conn.set_client_encoding(db_encoding)\n return conn", "def connect(self):\n if self.connection is not None:\n logger.info(\" connection: %s \" % (self.connection is not None))\n if not self.connection.opened():\n logger.info(\"connection is closed\")\n return self.reconect()\n\n if self.connection.opened():\n return self.connection\n try:\n self.connection = connect(**self.options)\n except Exception as e:\n logger.critical(\"Unable to connect to DB: {0}\".format(e.message))\n raise\n\n return self.connection", "def get_connection():\n conn = psycopg2.connect(\n host=\"ec2-174-129-229-162.compute-1.amazonaws.com\",\n database=\"d3fkgbedn66ll5\",\n user=\"vsimxlvondhgoo\",\n password=\"7402a95816c42b475ae285eb18918c56c9a012e96a85aafce983ea1618010511\",\n port=5432\n )\n return conn", "def get_connection(url):\n conn = psycopg2.connect(url)\n return conn", "def get_connector(self):\n conn = sqlite3.connect(self.db)\n conn.execute('pragma foreign_keys=on')\n\n return conn", "def create_connection(self, context, *, engine):\n try:\n connection = engine.connect()\n except OperationalError as exc:\n raise UnrecoverableError('Could not create SQLAlchemy connection: {}.'.format(str(exc).replace('\\n', ''))\n ) from exc\n\n with connection:\n yield connection", "def connect(self, database=None):\n try:\n if database: # if is not Null\n self._database = database\n connection = pymysql.connect(self._host, self._username, self._password, self._database)\n return connection\n except pymysql.InternalError as error:\n print(error.args[1])", "def __enter__(self) -> sqlite3.Connection:\n self.conn = sqlite3.connect(self.path_db)\n return self.conn", "def create_connection(self):\n try:\n conn = sqlite3.connect(self.db_path)\n return conn\n except Error as e:\n print(e)\n raise e", "def OpenConnection(self):\r\n # Open connection to database. If the database is not accessible,\r\n # throw a mariadb exception.\r\n try: \r\n Connection = mariadb.connect(\r\n user = self.Name,\r\n host = self.Host,\r\n password= self.Password,\r\n port=3306)\r\n # Catch mariadb exception.\r\n except mariadb.Error as e:\r\n print('Unable open connection {}.'.format(e))\r\n\r\n return Connection", "def connect(self):\n \n # return if already connected\n if self._connected: \n return\n \n # preconditions\n if self._url is None: \n raise Exception(\"Need a connection url\")\n \n self._engine = sqlalchemy.create_engine(self._url)\n\n self._conn = self._engine.connect()\n \n self._metadata = sqlalchemy.MetaData(bind=self._engine)\n \n self._session_maker = sessionmaker(bind=self._engine)\n \n self._connected = True\n \n self._log.info(\"Connected to the database %s\"%(self._url))", "def get_db_connection (dbname, username,\n password=None,\n host='/var/run/postgresql'):\n\n con = psycopg2.connect(\n database=dbname, user=username, password=password,\n host='/var/run/postgresql')\n return (con)", "def get_connection(self):\n if self.conn is None or self.conn.closed != 0:\n self._connect()\n logger.debug(f'The connection object is: {self.conn}.')\n return self.conn", "def db_connect():\n\n connect_string = \"sqlite:///database.sqlite\"\n\n return create_engine(connect_string)", "def db_connection():\n global dbconnection\n try:\n conn = dbconnection\n except:\n dbconnection = psycopg2.connect(user = dbuser,\n password = dbpass,\n host = dbserver,\n port = \"5432\",\n database = dbname)\n conn = dbconnection\n return conn", "def get_database_connection(local_dev=True):\n if local_dev:\n conn = psycopg2.connect(os.getenv(\"LOCAL_DATABASE_URL\"))\n else:\n conn = psycopg2.connect(os.getenv(\"DATABASE_URL\"))\n return conn", "def connection(self):\n return self._pgsql", "def openConnection():\n connection = nj.GraphDatabase.driver(\n uri=URI, auth=nj.basic_auth(USER, PASSWORD))\n return connection" ]
[ "0.81346554", "0.79718655", "0.79294276", "0.7832428", "0.77796745", "0.7633522", "0.7544016", "0.7544016", "0.7544016", "0.7544016", "0.7541601", "0.7495849", "0.74864846", "0.74438053", "0.7435526", "0.74306893", "0.74256927", "0.7408647", "0.74068403", "0.738495", "0.7350143", "0.7350143", "0.7335533", "0.7331129", "0.7326549", "0.7298887", "0.727763", "0.7244725", "0.7243021", "0.7242932", "0.7219729", "0.7217299", "0.7200819", "0.72000957", "0.71954405", "0.71954405", "0.7155893", "0.7155893", "0.7155893", "0.7155893", "0.7155893", "0.7144657", "0.7141964", "0.7112269", "0.71088284", "0.7103038", "0.70806795", "0.70781213", "0.7075562", "0.7041505", "0.7025864", "0.70098114", "0.6999627", "0.69925237", "0.6978114", "0.6974716", "0.6971241", "0.696749", "0.69568884", "0.6953646", "0.69401526", "0.6921507", "0.691113", "0.6904863", "0.69000113", "0.68972665", "0.6871975", "0.6859179", "0.68335253", "0.6828128", "0.68030196", "0.67999077", "0.67999077", "0.6797571", "0.67965734", "0.67949915", "0.67935723", "0.67906123", "0.678569", "0.67856616", "0.67843485", "0.6784165", "0.678384", "0.6780171", "0.677754", "0.67770135", "0.67767227", "0.6775886", "0.67654264", "0.6754775", "0.67507774", "0.6749325", "0.6749173", "0.6748439", "0.6745918", "0.67419267", "0.673078", "0.6721817", "0.6720839", "0.6716883" ]
0.7378869
20
Method to fetch tags associated with table
def fetch_table_tags( self, table_name: str, schema_name: str, inspector: Inspector, ) -> None:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_id_and_tags(self):\n return self.database.select(self.tname,\n [self.primary_key, 'tags'])", "def get_tags(self) -> List:\n LOGGER.info('Get all the tags')\n\n with self.client.create_session() as session:\n tag_count = (func.count(RDSTableTag.table_rk)\n + func.count(RDSDashboardTag.dashboard_rk)).label('tag_count')\n\n records = session.query(\n RDSTag.rk.label('tag_name'),\n tag_count\n )\\\n .outerjoin(RDSTableTag)\\\n .outerjoin(RDSDashboardTag)\\\n .filter(RDSTag.tag_type == 'default')\\\n .group_by(RDSTag.rk)\\\n .having(tag_count > 0)\\\n .all()\n\n results = []\n for record in records:\n results.append(TagDetail(tag_name=record.tag_name,\n tag_count=record.tag_count))\n\n return results", "def dataset_tags(connection):\n assert connection\n query = \"\"\"select * from tags()\"\"\"\n result = sqlio.read_sql_query(query, connection)\n return [item.strip() for item in result['name']], [tag_id.strip() for tag_id in result['tag_id']]", "def get_tags_list(*args, **kwargs):\n return Tag.objects.active()", "def get_tags_list(*args, **kwargs):\n return Tag.objects.active()", "def tags(table='None',record_id=None):\n\n return LOAD('plugin_wiki','tags',\n args=(table,record_id or 0),ajax=True)", "def get(self, currency, entity):\n check_inputs(currency=currency, entity=entity)\n tags = entitiesDAO.list_entity_tags(currency, entity)\n return tags", "def get_tags(self):\r\n\r\n\r\n\r\n #using database\r\n\r\n if self.using_database:\r\n aprint('GET TAGS')\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT tag\"\r\n +\" FROM tags_to_keys\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {tag[0] for tag in fetched}\r\n\r\n return set()\r\n\r\n #using shelf\r\n\r\n return self.tag_dict.keys()", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "def read(self, request, tag=None):\n tags = Tag.objects\n if tag:\n t = tags.get(slug=tag)\n return t.entry_set.all()\n else:\n return tags.all()", "def get_tags(request):\n as_list = request.params.get('as_list')\n if as_list:\n return [\n tag.name\n for tag in Tag.query.all()\n ]\n else:\n return [\n {\n 'name': tag.name,\n 'id': tag.id\n }\n for tag in Tag.query.all()\n ]", "def FindAllTags(cls):\n tagList = db.session.query(Tag).outerjoin(bookmarksTags)\\\n .filter(bookmarksTags.c.bookmarkId != None)\\\n .order_by(Tag.name.desc())\\\n .all()\n \n return tagList", "def tags():", "def get(self, request):\n serializer = self.serializer_class(self.queryset.all(), many=True)\n return Response({'tags':serializer.data}, status=status.HTTP_200_OK)", "def get_tags(self, obj):\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data", "def list_all_tags(self,obs):", "def getTags(self,):\n\t\treturn self.tags;", "def list_tags():\r\n tags = Tag.query.order_by(Tag.name).all()\r\n return render_template('tags.html', tags=tags)", "def get_all_tagged(self,tag_name):\n return self.tag2elements[tag_name]", "def get_photo_tags(self, photo_id):\n\n query_string = '''\n select photo_tag.tag_name from photo\n join photo_tag on(photo_tag.photo_id=photo.photo_id)\n where photo.photo_id={}\n '''.format(photo_id)\n\n # so an array of tags would be ok\n tag_data = self.db.get_query_as_list(query_string)\n for tag in tag_data:\n # print(self.decode_tag(tag['tag_name']))\n\n tag['human_readable_tag'] = self.decode_tag(tag['tag_name'])\n\n # print(tag_data)\n\n return tag_data", "def list_tags():\n\n tags = Tag.query.all()\n return render_template('tags/list_tags.html', tags=tags)", "def get(self):\n\n form = TagForm()\n tags = Tag.query.filter().all()\n\n if not tags:\n tags = None\n\n template_return = flask.render_template(\"tags.html\", table_data=tags, form=form)\n\n return flask.Response(template_return, mimetype=\"text/html\")", "def manage_society_tags_table(context):\n return context", "def get_tags():\n\treturn jsonify(tags=[i.serialise for i in Tag.query.all()])", "def get_tags(self):\n resp = self.get(_u.build_uri(\"tags\", domain=self.domain))\n return utils.handle_response(resp)", "def get_by_tag(cls, tag):\n out = []\n \n tags = Tag.expand_implied_by([tag])\n \n for t in tags:\n results = cls.objects.filter(owner=tag.owner, tags=t)\n \n for b in results:\n if b not in out:\n out.append(b)\n \n return out", "def list(self):\n\n\t\treturn self._list(\"/tag\", \"tag\")", "def get_all_tags_list(cls):\n all_tags_list = []\n # obj_list = cls.objects.filter(status=0).order_by('-update_time')\n obj_list = Article.objects.all()\n for obj in obj_list:\n all_tags_list = all_tags_list + obj.tags_list()\n # for tag in obj.tags.split(','):\n # all_tags_list.append(tag)\n return all_tags_list", "def tags(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'tags')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def list_tags(self, session):\n result = self._tag(session.get, session=session)\n return result['tags']", "def get_tags(self):\n return self.tags", "def tags(self):\r\n url = '{0}/tags/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json", "def get_tags(self) -> Dict:\n return self.orthanc.get_instance_tags(self.identifier)", "def get_entities(tags):\n pass", "def tags(self, uuid):\n return self._backend.tags(uuid)", "def get_all_tags():\n try:\n data = ReadTag().run()\n except Exception as ex:\n return jsonify({'code': '500','message':'Internal server error'})\n else:\n return jsonify({'code': '200','data': data})", "def show_tags():\n\n tags = Tag.query.all()\n\n return render_template(\"tags/tag_list.html\", tags=tags)", "def get_tags(self, tag_name: str):\n return self.soup.find_all(tag_name)", "def get_cells(self, tag):\n return [cell for cell in self.content.cells if 'tags' in cell.metadata.keys() and tag in cell.metadata['tags']]", "def find_all(self, params={}, **options):\n return self.client.get_collection(\"/tags\", params, **options)", "def get_tasks_tag(self, tag=None):\n cur = self.conn.cursor()\n if tag == None:\n return None\n else:\n cur.execute(\"SELECT * FROM tangerine WHERE '\" + str(tag) + \"'=any(tags);\")\n \n self.conn.commit()\n return [Task(self.columns, task) for task in cur.fetchall()]", "def tags(self) -> list[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"tags\", _args)\n return _ctx.execute_sync(list[str])", "def queryset(self, request, queryset):\n for tag in get_resource_tags():\n if self.value() == tag[0]:\n return queryset.filter(tags__slug__iexact=tag[0])", "def getTags(number=None):", "def list(self):\n return self._post(\n request='list',\n uri=ApiUri.TAGS.value,\n ).get('tags')", "def get_tags(self):\n\n return self.tags", "def tags(self) -> List[str]:\n return self._db_data.tags", "def get_tags(self, *args, **kwargs):\n \n tags_data = api.get_tags(\n *args,\n api_key=self.__creds.api_key_v2,\n **kwargs)\n return [en.Tag(tag_data) for tag_data in tags_data]", "def tags(self):\r\n return resources.Tags(self)", "def api_get_tags(request):\n\n # TODO Get favorite tags for the given user ID\n\n tags = Tag.objects.get_not_empty_tags()\n tag_names = []\n for tag in tags:\n tag_names.append(tag.name)\n\n return HttpResponse(content=json.dumps(tag_names))", "def get_tag_interactions(request, pk):\n try:\n tag = InteractionTag.objects.get(pk=pk)\n except InteractionTag.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n ans = []\n interactions = tag.interaction_set.all()\n for interaction in interactions:\n if request.user == interaction.owner:\n ans.append(InteractionSerializer(interaction).data)\n return Response(ans)", "def get_tags(self):\n\n base_url = self.get_parent().url\n tags = self.tags.all()\n\n for tag in tags:\n tag.url = f\"{base_url}tags/{tag.slug}/\"\n\n return tags", "def get_tag_object(self) -> Any:\n return self.tags", "def get_all_tags(self, dataset: \"Dataset\") -> List[\"DatasetTag\"]:\n raise NotImplementedError", "def prepare_tags(self, obj):\n return [tag.name for tag in obj.tags.all()]", "def show_tags():\n tags = Tag.query.all()\n\n return render_template('tags/show_tags.html', tags=tags)", "def get_queryset(self):\n queryset = super().get_queryset()\n return queryset.filter(tags__name=self.kwargs['tag_slug'])", "def tags(self):\n return self.get(\"tags\")", "def get_dbtags(self):\n\n return self._db_manager.get_tags()", "def show_tags(session, foodgroup=None):\n nutrition_tags = session.query(LocalNutritionaliase.ingkey,\n\t\t LocalNutrition.desc, func.group_concat(Tag.name)) \\\n .filter(TagItem.ndbno==LocalNutrition.ndbno) \\\n\t.filter(Tag.id==TagItem.tag_id) \\\n .filter(LocalNutritionaliase.ndbno==LocalNutrition.ndbno) \n if foodgroup is not None:\n nutrition_tags = nutrition_tags \\\n .filter(LocalNutrition.foodgroup==foodgroup) \n nutrition_tags = nutrition_tags \\\n\t.group_by(LocalNutrition.ndbno) \\\n .order_by(LocalNutritionaliase.ingkey)\n for ingkey, desc, tags in nutrition_tags:\n print(ingkey, \" || \", desc)\n print(\" \", tags)", "def get_keys_for_tag(self,tag):\r\n\r\n #using database\r\n if self.using_database:\r\n value_tuple = (notebookname, tag,)\r\n db_cursor.execute(\"SELECT keyword\"\r\n +\" FROM tags_to_keys\"\r\n +\" WHERE notebook=?\"\r\n +\" AND tag=?;\",\r\n value_tuple)\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {tag[0] for tag in fetched}\r\n\r\n return set()\r\n #using shelf\r\n if self.using_shelf:\r\n if self.tag_dict_contains(tag):\r\n return self.tag_dict[tag]\r\n return set()", "def tags(request):\n return Tag.objects.filter(user=request.user)", "def get_table_rows(self, conn):\n raise NotImplementedError(\"Please implement this method\")", "def list_tags(self, entry_name):\n return self.__datacatalog.list_tags(parent=entry_name)", "def get_tag_info(xint,conn):\n\n get_tags = ('SELECT DISTINCT fip2.value '\n 'FROM interaction i, feature_interaction fi, feature_interactionprop fip, '\n 'feature f, cvterm cvt, feature_interactionprop fip2, cvterm cvt2 '\n 'WHERE f.feature_id = fi.feature_id AND fi.interaction_id = i.interaction_id '\n 'AND fi.feature_interaction_id = fip.feature_interaction_id '\n 'AND fip.type_id = cvt.cvterm_id AND cvt.name = \\'participating feature\\' '\n 'AND fi.feature_interaction_id = fip2.feature_interaction_id AND fip2.type_id = cvt2.cvterm_id '\n 'AND cvt2.name = \\'comment\\' AND f.uniquename = %s AND i.uniquename = %s')\n tags = connect(get_tags,xint,conn)\n return(tags)", "def _getTagIDs(self):\n paths = self._criteria.get('paths')\n if paths:\n store = getMainStore()\n return list(store.find(Tag.id, Tag.path.is_in(paths)))", "def display_tags(self):\n from evennia.typeclasses.tags import Tag\n\n qs = (\n Tag.objects.filter(db_tagtype=None, db_category=None, db_data=None)\n .exclude(db_key__icontains=\"barracks\")\n .exclude(db_key__icontains=\"owned_room\")\n .exclude(db_key__icontains=\"_favorite\")\n )\n string = list_to_string([ob.db_key for ob in qs])\n self.msg(\n \"Types of tags (excluding custom ones for individuals, or those with categories): %s\"\n % string\n )", "def listTags(self, authenticationToken):\r\n pass", "def print_tags():\n for tag in Tag.query.all():\n print tag.__repr__()", "def all_tags(self):\n tags = set()\n query = self.sql_session.query(Feature).all()\n for tag in query:\n tags.add((tag.key, json.loads(tag.value)))\n return tags", "def tagged(request,slug):\n\n tag = get_object_or_404(Tag, slug=slug)\n books = Book.objects.filter(tags=tag)\n \n for book in books:\n book\n\n context = {\n 'tag':tag,\n 'books':books,\n }\n return render(request, 'favorite.html', context)", "def tagged(self, tag_slug):\n return self.filter(tag__slug=tag_slug)", "def get_tags(self):\n return self.get_url_data(self.api_url + 'refs/tags')", "def tags(self):\r\n url = self.base_url + 'tags/'\r\n return json.loads(self.bb.load_url(url))", "def get_tags(request):\n try:\n tags = []\n for tag in Tag.objects.all():\n tags.append({\"title\": tag.title, \"id\": tag.pk})\n\n return format_ajax_response(True, \"Knowledgebase tags retrieved successfully.\", {\"tags\": tags})\n except Exception as ex:\n logger.error(\"Failed to get_tags: %s\" % ex)\n return format_ajax_response(False, \"There was an error retrieving the knowledgebase tags.\")", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def entity_tags(self):\n return self._entity_tags", "def tags(self):\r\n return Tags(self)", "def tags(self):\r\n return Tags(self)", "def get(self, currency, address):\n check_inputs(address=address, currency=currency) # abort if fails\n address_tags = commonDAO.list_address_tags(currency, address)\n return address_tags # can be empty list", "def __load_tags(self) -> None:\n self.tags = TagHelper.TagHelper.generate_tag_object(self)\n self.tag_helper = TagHelper.TagHelper(self)\n self.tag_helper.fetch()", "def tags():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 7772400\r\n section.page_height = 10058400\r\n document.add_heading('Tags', level=1)\r\n tags = get_qlik_sense.get_tag()\r\n num_of_tags = len(tags)\r\n table = document.add_table(rows=num_of_tags+1, cols=1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'name'\r\n for tag in range(num_of_tags):\r\n row = table.rows[tag+1]\r\n row.cells[0].text = str(tags[tag])", "def get_tag_objects(session, tags):\n\n tag_objs = []\n\n for key, value in tags:\n tag = TagsDbHandler().get_tag(session, key)\n if tag:\n tag_objs.append(tag)\n\n continue\n\n new_tag = Tags(key, value)\n tag_objs.append(new_tag)\n\n session.add(new_tag)\n\n return tag_objs", "def get_tags(self, obj):\n if QuestionTag.objects.filter(question=obj).exists():\n id_tags = QuestionTag.objects.filter(question=obj).values('tag__id')\n tags_obj = Tag.objects.filter(id__in=id_tags)\n return TagSerializer(tags_obj, many=True).data\n else:\n return \"No tags\"", "def get_tags(self, tags, filename):\n return self.get_tags_batch(tags, [filename])[0]", "def getTagsUsingId(self,resourceId):\n response = requests.get('https://api.imagga.com/v1/tagging?content=%s' % resourceId,\n auth=(self.apikey, self.secret))\n #print ('printing response')\n #print (response.json())", "def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")", "def getTagList(self):\n if not self.proxy:\n self.proxy = self.session.service(\"ALBehaviorManager\")\n return self.proxy.getTagList()", "def tags(self):\n tags = Tag.objects.all()\n tags = tags.annotate(num_times=models.Count('blog_posttag_items'))\n tags = tags.order_by('-num_times')\n\n return tags", "def get_tags(self, root):\n tags = root.xpath(self.path)\n return tags if self.many else tags[:1]", "def tag_list(self, obj):\n logger.debug('Called Tag_list in admin: %s', self)\n return u\", \".join(o.name for o in obj.tags.all())" ]
[ "0.6922398", "0.66747177", "0.65673965", "0.65502906", "0.65502906", "0.64735454", "0.6464283", "0.6420494", "0.64154035", "0.64154035", "0.6354844", "0.62586063", "0.6207295", "0.62069833", "0.6197782", "0.61720955", "0.6168536", "0.6159042", "0.61435735", "0.6085799", "0.6078655", "0.6075457", "0.60630363", "0.603711", "0.6032011", "0.60310113", "0.6005614", "0.59951276", "0.59922767", "0.59376276", "0.59205073", "0.5913005", "0.59113103", "0.590861", "0.5906833", "0.5876668", "0.5873853", "0.5846818", "0.583646", "0.58300066", "0.5816529", "0.580414", "0.5788975", "0.57717943", "0.57621664", "0.5752402", "0.5732671", "0.57075995", "0.56963956", "0.5669856", "0.5668791", "0.56684905", "0.5668392", "0.5656713", "0.56487375", "0.56480056", "0.5640526", "0.56345373", "0.5631788", "0.56270784", "0.5619347", "0.55963606", "0.5593337", "0.5592146", "0.5584502", "0.5570322", "0.55619526", "0.5551739", "0.5543364", "0.55244297", "0.55240136", "0.55180347", "0.5513524", "0.55003613", "0.54755515", "0.5468956", "0.54669064", "0.54669064", "0.54669064", "0.54669064", "0.54669064", "0.54669064", "0.54669064", "0.54669064", "0.54551566", "0.5451035", "0.5451035", "0.54492366", "0.54420406", "0.54413885", "0.5429571", "0.54295135", "0.5411736", "0.53985363", "0.53974223", "0.53974223", "0.53951573", "0.5388661", "0.5382357", "0.5377636" ]
0.72872764
0
This method is interesting to be maintained in case some connector, such as BigQuery, needs to perform some added logic here. Returning `table` is just the default implementation.
def standardize_table_name(self, schema_name: str, table: str) -> str: return table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_table(self):\n\t\treturn self._table", "def getTable(self):\n\n raise NotImplementedError", "def _get_table(self, cursor):\n raise NotImplementedError", "def __getTable(self):\n\n if not self.__table:\n tableConnectionParams = parseConnectionString(\n self.tableConnString);\n\n self.__table = Table(\n tableConnectionParams['name'],\n connection = getDbConnection(tableConnectionParams));\n\n return self.__table;", "def getTable(table):\n\n return session.query(table).all()", "def getTable(self):\n return self.table", "def _get_table_obj(self, mode):\n return self.client[f\"bigquery_{mode}\"].get_table(self.table_full_name[mode])", "def table(self) -> 'outputs.PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable':\n return pulumi.get(self, \"table\")", "def getTableByName(self, tablename):\n pass", "def __init__(self, table: str):\n self.table_id = table\n # Set up BigQuery client.\n self.bqclient = bigquery.Client(credentials=bqmagics.context.credentials)\n # Get the table properties to validate that all is working.\n self.table = self.bqclient.get_table(self.table_id)\n print(f'''Table {self.table.project}.{self.table.dataset_id}.{self.table.table_id}\n currently has {self.table.num_rows} rows.''')", "def table(self):\n if not self.exists:\n return None\n return self._get_table()", "def _get_table_reflection(self, schema: str, table: str) -> Table:\n return self.sql_metadata.tables.get(f\"{schema}.{table}\",\n Table(table, self.sql_metadata, schema=schema, autoload=True))", "def table(self, table: Union[str, sa.Table]) -> B[B, E]:", "def table(self, table_name):\n return self._get_storage().table(table_name)", "def table(self):\n return self._table", "def table(self):\n return self._table", "def getTable(self, tablename):\n tablename = self.prefix + tablename\n if not tablename in self.tables:\n self.tables[tablename] = Table( tablename, self.metadata, \\\n autoload=True, autoload_with=self.conn )\n\n return self.tables[tablename]", "def table(self):\r\n return self._table", "def getTable(self):\n return self.db.table(self.entity)", "def _select_table(self):\n\n return self.postgres.execute(f\"SELECT * FROM {self.table_name};\")", "def read_table(self, table):\n return READ_TABLE(table, db=self.db)", "def table(self):\n return self.snowflake_options.table", "def table(entity) -> sa.Table:\n return entity.__table__", "def getTableDefForTable(self, tableName):\n\t\tif not \".\" in tableName:\n\t\t\ttableName = \"public.\"+tableName\n\t\t\n\t\tfor row in self.readerConnection.queryToDicts(\n\t\t\t\t\"select sourcerd, tablename from dc.tablemeta where\"\n\t\t\t\t\" lower(tableName)=%(tableName)s\",\n\t\t\t\t{\"tableName\": tableName.lower()}):\n\t\t\tbreak\n\t\telse:\n\t\t\traise base.ui.logOldExc(\n\t\t\t\tbase.NotFoundError(tableName, \"table\", \"dc_tables\"))\n\n\t\treturn base.caches.getRD(row[\"sourcerd\"]\n\t\t\t).getById(row[\"tablename\"].split(\".\")[-1])", "def get_table(self):\n return copy.deepcopy(self._table)", "def table(self):\n return self.reference.table", "def get_table(table_id: int) -> Table:\n table = Table.query.filter_by(id=table_id).first()\n return table", "def table(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"table\")", "def table(self, name):\r\n if name in self._tables:\r\n return _tables[name]\r\n\r\n table = Table(name, self._storage)", "def get_table_instance(\n project_id: str, instance_id: str, table_name: str\n) -> happybase.Table:\n start = time.time()\n client = bigtable.Client(project=project_id, admin=True)\n instance = client.instance(instance_id)\n connection = happybase.Connection(instance=instance)\n table = connection.table(table_name)\n end = time.time()\n print(\"Elapsed time for getting table instance: {}s\".format(end - start))\n return table", "def table(self):\n return self.generator.table", "def table(self):\n if self._table is None:\n self._table = list(self._iter_rows())\n\n return self._table", "def table(self):\n return self.t", "def getTable(self, name: str):\n query = f\"SELECT * FROM '{name}';\"\n result = sql.executeAndReadQuery(self.connection, query)\n return result", "def db_table(self):", "def table(cls):\n return cls.__name__", "def get_table(table_type):\n if table_type == 'chaining':\n return chaining()\n elif table_type == 'probing':\n return probing()\n elif table_type == 'probing2':\n return probing(probe=2)\n else:\n return robinhood()", "def table(self, name: str) -> ir.TableExpr:\n qualified_name = self._qualify(name)\n return self.client.table(qualified_name, self.name)", "def get_table_byname(self, aTable):\n if aTable in self._tablesObjects.keys():\n oTable = self._tablesObjects[aTable]\n else:\n oTable = None\n return oTable", "def table(self, table_id):\n return Table(table_id, self)", "def table(self):\n return self._table_name", "def __check_table(input_table):\n\n try:\n table = TABLE_TYPES[input_table]\n return table\n except KeyError:\n raise InvalidTableType(input_table)", "def get_tablename(self):\n return self.ds_table", "def _get_tabletype(cls) -> str:\n raise NotImplementedError", "def _tables(self):\n assert False, \"subclass responsibility\"", "def holoviews_table(self) -> holoviews.Table:\n if self._holoviews_table is None:\n self._holoviews_table = holoviews.Table(self.table)\n return self._holoviews_table", "def basic_table_creation():\n results = {\n 'From pyarrow arrays': pa.table([\n pa.array(['Kakashi', 'Itachi', 'Shisui'], type=pa.string()),\n pa.array(['Hatake', 'Uchiha', 'Uchiha'], type=pa.string())\n ], names=['first_name', 'last_name']),\n 'From List[dict]': pa.Table.from_pylist([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ]),\n 'From Dict[str, list]': pa.Table.from_pydict({\n 'first_name': ['Kakashi', 'Itachi', 'Shisui'],\n 'last_name': ['Hatake', 'Uchiha', 'Uchiha'],\n }),\n 'From pandas df': pa.Table.from_pandas(pd.DataFrame([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ])),\n }\n pretty_print_result_map(results)", "def _init_table(self, table: \"Table\"):\n if not self.columns:\n self.columns = table.columns\n self._data = table.data", "def get_table_definition(jwt_payload: dict, schema_name: str, table_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n schema_virtual_module = dj.create_virtual_module(schema_name, schema_name)\n return getattr(schema_virtual_module, table_name).describe()", "def get_table(base, engine):\n class w1_temp_table(base):\n __tablename__ = 'w1_temp'\n __table_args__ = {\"useexisting\": True}\n\n id = sa.Column(sa.types.Integer, primary_key=True)\n logger_id = sa.Column(sa.types.Integer)\n value = sa.Column(sa.types.String)\n datetime = sa.Column(sa.types.DateTime)\n return w1_temp_table", "def table(self, table_name):\n return self._create_table(table_name)", "def query_generic_table(self, table_name):\n\n query = \"select * from {}\"\n try:\n self.dbCursor.execute(query.format(table_name))\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n return self.dbCursor.fetchall()", "def table_info(self, table_path: str, verbose:bool = True) -> Table:\n dataset, table = table_path.split('.')\n dataset_ref = self.client.dataset(dataset)\n table_ref = dataset_ref.table(table)\n info = self.client.get_table(table_ref)\n if verbose:\n pprint({'created': info.created,\n 'description': info.description,\n 'modified': info.modified,\n 'num_bytes': f'{info.num_bytes:,}',\n 'num_rows': f'{info.num_rows:,}',\n 'schema': info.schema})\n return info", "def get_table(self, **kwargs):\r\n options = {}\r\n table_class = self.get_table_class()\r\n table = table_class(self.get_table_data(), **kwargs)\r\n paginate = self.get_table_pagination() # pylint: disable=E1102\r\n if paginate is not None:\r\n options['paginate'] = paginate\r\n RequestConfig(self.request, **options).configure(table)\r\n return table", "def use_table(self):\n connection = self._get_connection()\n cursor = connection.cursor()\n cursor.execute(\n 'select exists(select * from information_schema.tables where table_name=%s)',\n (self.table,),\n )\n if cursor.fetchone()[0]:\n self.logger.info('Using existing table')\n else:\n try:\n cursor.execute(\n f'CREATE TABLE {self.table} ( \\\n ID VARCHAR PRIMARY KEY, \\\n DOC BYTEA);'\n )\n self.logger.info('Successfully created table')\n except (Exception, psycopg2.Error) as error:\n self.logger.error('Error while creating table!')\n connection.commit()\n self._close_connection(connection)", "def table_name() -> str:\n pass", "def makeTableWidget(self):\n from collective.table.browser.table import TableWidget\n context = self.portal.table\n widget = TableWidget(context, None)\n widget.fieldName = 'table'\n return widget", "def getTableByIndex(self, index):\n pass", "def _table_from_ft(ft_schema: dict) -> bigquery.Table:\n # A \"TableSchema\" is just a sequence of SchemaFields https://googleapis.dev/python/bigquery/latest/generated/google.cloud.bigquery.table.Table.html\n schema = list(map(_create_field_schema, ft_schema['columns']))\n table = bigquery.Table(\n bigquery.TableReference(ds, to_safe_name(ft_schema['name'])),\n schema\n )\n table.description = ft_schema.get('description', '')\n return table", "def select_from_table(self, table_name):\n sql_str = \"SELECT * FROM {tb}\".format(tb=table_name)\n cur = self.conn.cursor()\n cur.execute(sql_str)\n names = [description[0] for description in cur.description]\n\n rows = cur.fetchall()\n\n df = pd.DataFrame(rows, columns =names) \n\n return df", "def get_table(dataset_id: Optional[str] = None,\n project: Optional[str] = None,\n selected_fields: Optional[str] = None,\n table_id: Optional[str] = None,\n view: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTableResult:\n __args__ = dict()\n __args__['datasetId'] = dataset_id\n __args__['project'] = project\n __args__['selectedFields'] = selected_fields\n __args__['tableId'] = table_id\n __args__['view'] = view\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:bigquery/v2:getTable', __args__, opts=opts, typ=GetTableResult).value\n\n return AwaitableGetTableResult(\n clone_definition=pulumi.get(__ret__, 'clone_definition'),\n clustering=pulumi.get(__ret__, 'clustering'),\n creation_time=pulumi.get(__ret__, 'creation_time'),\n default_collation=pulumi.get(__ret__, 'default_collation'),\n default_rounding_mode=pulumi.get(__ret__, 'default_rounding_mode'),\n description=pulumi.get(__ret__, 'description'),\n encryption_configuration=pulumi.get(__ret__, 'encryption_configuration'),\n etag=pulumi.get(__ret__, 'etag'),\n expiration_time=pulumi.get(__ret__, 'expiration_time'),\n external_data_configuration=pulumi.get(__ret__, 'external_data_configuration'),\n friendly_name=pulumi.get(__ret__, 'friendly_name'),\n kind=pulumi.get(__ret__, 'kind'),\n labels=pulumi.get(__ret__, 'labels'),\n last_modified_time=pulumi.get(__ret__, 'last_modified_time'),\n location=pulumi.get(__ret__, 'location'),\n materialized_view=pulumi.get(__ret__, 'materialized_view'),\n max_staleness=pulumi.get(__ret__, 'max_staleness'),\n model=pulumi.get(__ret__, 'model'),\n num_active_logical_bytes=pulumi.get(__ret__, 'num_active_logical_bytes'),\n num_active_physical_bytes=pulumi.get(__ret__, 'num_active_physical_bytes'),\n num_bytes=pulumi.get(__ret__, 'num_bytes'),\n num_long_term_bytes=pulumi.get(__ret__, 'num_long_term_bytes'),\n num_long_term_logical_bytes=pulumi.get(__ret__, 'num_long_term_logical_bytes'),\n num_long_term_physical_bytes=pulumi.get(__ret__, 'num_long_term_physical_bytes'),\n num_partitions=pulumi.get(__ret__, 'num_partitions'),\n num_physical_bytes=pulumi.get(__ret__, 'num_physical_bytes'),\n num_rows=pulumi.get(__ret__, 'num_rows'),\n num_time_travel_physical_bytes=pulumi.get(__ret__, 'num_time_travel_physical_bytes'),\n num_total_logical_bytes=pulumi.get(__ret__, 'num_total_logical_bytes'),\n num_total_physical_bytes=pulumi.get(__ret__, 'num_total_physical_bytes'),\n range_partitioning=pulumi.get(__ret__, 'range_partitioning'),\n require_partition_filter=pulumi.get(__ret__, 'require_partition_filter'),\n schema=pulumi.get(__ret__, 'schema'),\n self_link=pulumi.get(__ret__, 'self_link'),\n snapshot_definition=pulumi.get(__ret__, 'snapshot_definition'),\n streaming_buffer=pulumi.get(__ret__, 'streaming_buffer'),\n table_constraints=pulumi.get(__ret__, 'table_constraints'),\n table_reference=pulumi.get(__ret__, 'table_reference'),\n time_partitioning=pulumi.get(__ret__, 'time_partitioning'),\n type=pulumi.get(__ret__, 'type'),\n view=pulumi.get(__ret__, 'view'))", "def table(self, name):\r\n return NamedTable(self.name, name)", "def basic_table_details():\n tbl: pa.table = pa.Table.from_pylist([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ])\n\n results = {\n 'column_names': tbl.column_names,\n 'columns > map > combine_chunks > to_pylist': [col.combine_chunks().to_pylist() for col in tbl.columns],\n 'nbytes': tbl.nbytes,\n 'num_columns': tbl.num_columns,\n 'num_rows': tbl.num_rows,\n 'schema': tbl.schema,\n 'shape': tbl.shape,\n }\n\n print(results)", "def get_table_match_api(self, table_id=None):\n pass", "def __init__(self, table):\n\n self.table = table\n\n ## Lots of shortcutting\n # \"Connection\"\n self.client = self.table.db.client\n\n # Table object\n self.worksheet = self.table.worksheet\n self.worksheet_id = self.table.worksheet_id\n\n # Addressing\n self.fields = self.table.fields\n\n ## And something to come later\n self.row = None\n self.data = {}", "def table_builder(request):\n kind=request.param\n def _builder(data, columns):\n if kind==\"array\":\n return np.array(data)\n elif kind==\"table\":\n return DataTable(data,columns,transposed=False)\n else:\n return pd.DataFrame(data,columns=columns)\n _builder.kind=kind\n return _builder", "def copy_table(self, table: Table) -> Table:\n self._requires_table(table)\n return table.copy()", "def table(self, data: \"Data\" = None) -> \"DeltaGenerator\":\n if _use_arrow():\n return self.dg._arrow_table(data)\n else:\n return self.dg._legacy_table(data)", "def table(self, i):\n return self.__tables[i]", "def table(self, i):\n return self.__tables[i]", "def __tablename__(cls):\n return get_table_name(cls.__name__)", "def isTable(self, tableName):\n url = '%s/_table/%s' % (self.uri, tableName)\n data, resp = self.execute(method='GET', url=url, decode=True)\n return data", "def model_table():\r\n class OccupationTable(tables.Table):\r\n class Meta:\r\n model = Occupation\r\n assert [\"id\", \"name\", \"region\"] == list(OccupationTable.base_columns.keys())\r\n\r\n class OccupationTable2(tables.Table):\r\n extra = tables.Column()\r\n\r\n class Meta:\r\n model = Occupation\r\n assert [\"id\", \"name\", \"region\", \"extra\"] == list(OccupationTable2.base_columns.keys())\r\n\r\n # be aware here, we already have *models* variable, but we're importing\r\n # over the top\r\n from django.db import models\r\n\r\n class ComplexModel(models.Model):\r\n char = models.CharField(max_length=200)\r\n fk = models.ForeignKey(\"self\")\r\n m2m = models.ManyToManyField(\"self\")\r\n\r\n class ComplexTable(tables.Table):\r\n class Meta:\r\n model = ComplexModel\r\n assert [\"id\", \"char\", \"fk\"] == list(ComplexTable.base_columns.keys())", "def __table_cls__(cls, *args, **kwargs):\n # check if a table with this name already exists\n # allows reflected tables to be applied to model by name\n key = _get_table_key(args[0], kwargs.get('schema'))\n\n if key in cls.metadata.tables:\n return sa.Table(*args, **kwargs)\n\n # if a primary key or constraint is found, create a table for\n # joined-table inheritance\n for arg in args:\n is_pk_column = isinstance(arg, sa.Column) and arg.primary_key\n is_pk_constraint = isinstance(arg, sa.PrimaryKeyConstraint)\n if is_pk_column or is_pk_constraint:\n return sa.Table(*args, **kwargs)\n\n # if no base classes define a table, return one\n # ensures the correct error shows up when missing a primary key\n for base in cls.__mro__[1:-1]:\n if '__table__' in base.__dict__:\n break\n else:\n return sa.Table(*args, **kwargs)\n\n # single-table inheritance, use the parent tablename\n if '__tablename__' in cls.__dict__:\n del cls.__tablename__", "def table(self, name: str, database: str | None = None) -> ir.Table:\n alch_table = self._get_sqla_table(name, schema=database)\n node = self.table_class(source=self, sqla_table=alch_table)\n return self.table_expr_class(node)", "def describe_table(self, table_name, timeout):\n _abstract()", "def describe_table(self, table_name, timeout):\n _abstract()", "def get_my_tables(self):\n qnum = self.master('sql', att={'type': 'table'}) # it's a Table._call_() function call\n if self.run():\n return (self.table_factory(self.get_table_info(result[0])) for result in self.results[qnum])\n else:\n print('An error has occurred when initializing the database.')", "def get_table(new_arr, types, titles):\n try:\n table = agate.Table(new_arr, titles, types)\n return table\n except Exception as e:\n print e", "def table_reference(self) -> 'outputs.PreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference':\n return pulumi.get(self, \"table_reference\")", "def get_table_name(self):\n return self._table", "def create_table(self):\n pass", "def start_table(self):\n raise NotImplementedError", "def get_table_meta(self, table_name):\n table = self._metadata['tables'].get(table_name)\n if table is None:\n raise ValueError('Table \"{}\" does not exist'.format(table_name))\n\n return copy.deepcopy(table)", "def create_new_table():\n dataset = create_dataset()\n table_id = \"{}.{}.corona_cases_table\".format(client.project, dataset.dataset_id)\n table = bigquery.Table(table_id)\n table = client.create_table(table, exists_ok=True)\n print(\n \"Created table {}.{}.{}\".format(table.project, table.dataset_id, table.table_id)\n )\n return table", "def _create_table_if_not_exists(self) -> bigquery.Table:\n table = self.client.create_table(\n table=bigquery.Table(table_ref=self._table_ref, schema=Schema),\n exists_ok=True,\n )\n logging.info(\"table %s already exists.\", table.full_table_id)\n return table", "def get_table_data(self):\r\n if self.table_data:\r\n return self.table_data\r\n elif hasattr(self, \"get_queryset\"):\r\n return self.get_queryset()\r\n raise ImproperlyConfigured(\"Table data was not specified. Define \"\r\n \"%(cls)s.table_data\"\r\n % {\"cls\": type(self).__name__})", "def GetTable(self, table_id):\n for table in self.tables:\n if table.table_id == table_id:\n return table\n\n return None", "def get_table_class(self):\r\n if self.table_class:\r\n return self.table_class\r\n raise ImproperlyConfigured(\"A table class was not specified. Define \"\r\n \"%(cls)s.table_class\"\r\n % {\"cls\": type(self).__name__})", "def _get_or_create_table(self):\n\n table_schema = self._get_table_schema()\n try:\n table_description = self.client.create_table(**table_schema)\n logging.info('DynamoDB Table %s did not exist, creating.',self.table_name)\n\n # In case we created the table, wait until it becomes available.\n self._wait_for_table_status('ACTIVE')\n logging.info('DynamoDB Table %s is now available.',self.table_name)\n\n self.client.update_time_to_live(\n TableName=self.table_name,\n TimeToLiveSpecification={\n 'Enabled': True,\n 'AttributeName': self._expiry_field.name\n }\n )\n logging.info('DynamoDB Table %s now expires items',self.table_name)\n\n return table_description\n\n except ClientError as e:\n error_code = e.response['Error'].get('Code', 'Unknown')\n # If table exists, do not fail, just return the description.\n if error_code == 'ResourceInUseException':\n return self.client.describe_table(TableName=self.table_name)\n else:\n raise e", "def get_wrapped_table(self):\n assert self.is_table_wrapper\n for child in self.children:\n if isinstance(child, TableBox):\n return child\n else: # pragma: no cover\n raise ValueError('Table wrapper without a table')", "def _TryGetCurrentSchema(dataset_id, table_id, project_id):\n client = GetApiClient()\n service = client.tables\n get_request_type = GetApiMessage('BigqueryTablesGetRequest')\n get_request = get_request_type(datasetId=dataset_id,\n tableId=table_id,\n projectId=project_id)\n try:\n table = service.Get(get_request)\n if not table or table.type != 'TABLE':\n raise SchemaUpdateError('Schema modifications only supported '\n 'on TABLE objects received [{}]'.format(\n table))\n except apitools_exceptions.HttpNotFoundError:\n raise SchemaUpdateError('Table with id [{}:{}:{}] not found.'.format(\n project_id, dataset_id, table_id))\n\n return table.schema", "def get_context_table_name(self, table):\r\n return self.context_table_name or \"table\"", "def table(self, data, dbPath:str=None) -> Type[\"Table\"]:\n return Table(data=data, dbPath=dbPath, s=self)", "def get_table(self, table, format=\"FITS\", verbose=False):\n # make sure the table exists\n try:\n results = self.quick(\"select top 0 * from {}\".format(table),context=\"MYDB\")\n except Exception as e:\n # raise ValueError(\"table MyDB.{} not found\".format(table)) from None\n raise_from(ValueError(\"table MyDB.{} not found\".format(table)), None)\n # first try to get it as a quick request, which is much faster if it works\n try:\n return self.quick(\"select * from {}\".format(table),context=\"MYDB\",astropy=True)\n except Exception as e:\n pass\n \n # sigh, have to go through output queue\n t0 = time.time()\n format = format.upper()\n if format not in [\"FITS\",\"CSV\"]:\n # just force a good value\n format = \"FITS\"\n if verbose:\n print(\"Making output request for {}-format data\".format(format))\n job_id = self.request_output(table,format)\n status = self.monitor(job_id)\n if status[0] != 5:\n raise Exception(\"Output request failed.\")\n job_info = self.job_info(jobid=job_id)[0]\n url = job_info[\"OutputLoc\"]\n if format == \"FITS\":\n fh = fits.open(url)\n # TDIM keywords in the Casjobs FITS header are simply wrong\n # Have to delete them to avoid bad problems in astropy.io.fits\n del fh[1].header['TDIM*']\n tab = Table(fh[1].data)\n fh.close()\n else:\n r = requests.get(url)\n r.raise_for_status()\n tab = ascii.read(MastCasJobs.replacenull(r.text),format='csv')\n if verbose:\n print(\"{:.1f} s: Retrieved {} row {} table\".format(time.time()-t0,len(tab),format))\n return tab", "def _get_table(self, key):\n table = getattr(self, key)\n if table is None or isinstance(table, int):\n return table\n return table.tid", "def __init__(self, queries, table_info, storage_type):\n super().__init__(queries, table_info, storage_type)", "def table(self) -> 'outputs.PreventionJobTriggerInspectJobActionSaveFindingsOutputConfigTable':\n return pulumi.get(self, \"table\")", "def test_rt_table(self) -> None:\n expected = Fixtures.next_table()\n expected.description = '\"hello!\" said no one'\n expected.tags.sort()\n\n self.get_proxy().put_table(table=expected)\n actual: Table = self.get_proxy().get_table(table_uri=checkNotNone(expected.key))\n actual.last_updated_timestamp = None\n actual.tags.sort()\n\n self.assertEqual(expected, actual)", "def get_table_def(dict_in, db_in):\n meta = MetaData(db_in)\n \n val_mapping = {\n 'pressure': Integer,\n 'temperature': Float,\n 'humidity': Float,\n 'battery': Integer,\n 'colorTemperature': Integer,\n }\n \n val_type = val_mapping.get(dict_in['name'], String)\n \n\n table_def = Table(dict_in['name'], meta, \n Column('source', String),\n Column('name', String),\n Column('displayName', String),\n Column('value', String),\n Column('unit', String),\n Column('deviceId', Integer),\n Column('hubId', Integer),\n Column('locationId', Integer),\n Column('installedAppId', Integer),\n Column('descriptionText', String),\n Column('timestamp', DateTime),\n )\n return table_def", "def table(self, table):\n self._table = table\n return self" ]
[ "0.7520106", "0.7386781", "0.72861874", "0.7049766", "0.70220125", "0.69553965", "0.69106346", "0.6874991", "0.68543154", "0.6838191", "0.68266505", "0.67859554", "0.67699444", "0.671571", "0.67097473", "0.67097473", "0.6674049", "0.66547084", "0.66416943", "0.6624039", "0.6609071", "0.6595021", "0.6545639", "0.6500418", "0.64889026", "0.64414334", "0.6397164", "0.6359427", "0.63586664", "0.6341421", "0.63175166", "0.63045335", "0.6282519", "0.62751406", "0.6272438", "0.6256033", "0.62197536", "0.6218494", "0.6216846", "0.62149143", "0.61796", "0.617767", "0.61522895", "0.6143521", "0.6104055", "0.6094414", "0.6072861", "0.60570884", "0.6050315", "0.6021708", "0.60139585", "0.60094285", "0.59999543", "0.599161", "0.5989031", "0.5985576", "0.59854466", "0.59596974", "0.59573245", "0.59103256", "0.5908518", "0.59029025", "0.5894301", "0.5886251", "0.58664834", "0.58655053", "0.5860944", "0.58517975", "0.58346665", "0.58346665", "0.58173263", "0.5808934", "0.58038944", "0.58035827", "0.5795618", "0.57914144", "0.57914144", "0.57761997", "0.5771142", "0.5761321", "0.5757187", "0.5755289", "0.57394755", "0.5735174", "0.5734633", "0.5732951", "0.57272226", "0.57224405", "0.5722231", "0.57169384", "0.57117486", "0.5705909", "0.5703246", "0.56898254", "0.56869644", "0.5677166", "0.56759036", "0.5671516", "0.56700146", "0.5669766", "0.56676424" ]
0.0
-1
calcule la puissance de x ^ n
def puissance(x: float, n: int) -> float: resultat: float = 1 signe: int = 1 if n != 0: if n <= 0: n = -n signe = -1 for cpt in range(1, n + 1): resultat = resultat * x if signe < 0: resultat = 1 / resultat return resultat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pseudo(x,N) :\n\treturn (x**2+1)%N", "def power(x, n):\n power = 1\n for i in range(abs(n)):\n power = multiply(power, x) \n return power", "def __pow__(self,n):\r\n\t\t\r\n\t\t# take power\r\n\t\tp = self.power(n)\r\n\t\t\r\n\t\treturn p", "def power(x, n):\n value = 1\n for i in range(n):\n value = multiply(value, x)\n return value", "def zn_pow(x, y, n):\n if y < 0:\n y = abs(y)\n x = inverse_in_zn(x, n)\n product = 1\n for i in range(y):\n product = (product * x) % n\n vprint(\"{}^{}={}\".format(x, i, product))\n return product", "def powerize(n, p):\n return sum(int(d)**p for d in str(n))", "def power(x, n):\n if n == 0:\n return 1\n result = power(x, math.floor(n / 2))\n if n % 2 > 0:\n return x * result * result\n else:\n return result * result", "def zernike_num_coeff(n):\n \n\tif not (n>=0):\n\t\tprint('Input parameter must be >= 0')\n\t\traise AssertionError() \n \n\treturn sum(xrange(n+1)) + n+1", "def modpow(a, n, p):\n res = 1\n a = a % p\n while n > 0:\n # if n is odd\n if n & 1:\n res = (res * a) % p\n n = n >> 1 # n = n / 2\n a = (a*a) % p\n\n return res", "def calculateCrypt(asci: int, e: int, n: int) -> int:\n return pow(int(asci),e,n)", "def power(x, m, n):\n a = 1\n while m > 0:\n if m % 2 == 1:\n a=(a*x)%n\n x=(x*x)%n\n m//=2\n return a", "def sol(n):\n p = 1\n res = 0\n \n while n:\n p*=5\n if n&1:\n res+=p\n n=n>>1\n return res%1000000007", "def zzX_pow(f, n):\n if poly_univariate_p(f):\n return zzx_pow(f, n)\n if not n:\n return zzX_const_of(f, 1)\n if n == 1 or zzX_zero_p(f) or zzX_one_p(f):\n return f\n\n g = zzX_const_of(f, 1)\n\n while True:\n n, m = n//2, n\n\n if m & 1:\n g = zzX_mul(g, f)\n\n if n == 0:\n break\n\n f = zzX_sqr(f)\n\n return g", "def compute(n):\n if n == 1:\n return 1\n else:\n i = find_i(n)\n return 2 * compute(n - i) + 2 ** i - 1", "def power2(x, n):\n if n == 0:\n return 1\n else:\n partial = power2(x, n // 2)\n result = partial * partial\n if n % 2 == 1:\n result *= x\n return result", "def power2(x, n):\n if n == 0:\n return 1\n else:\n partial = power2(x, n // 2)\n result = partial * partial\n if n % 2 == 1:\n result *= x\n return result", "def find_invpow(x,n):\n high = 1\n while high ** n <= x:\n high *= 2\n low = high//2\n while low < high:\n mid = (low + high) // 2\n if low < mid and mid**n < x:\n low = mid\n elif high > mid and mid**n > x:\n high = mid\n else:\n return mid\n return mid + 1", "def formula_n(self, n: int, x: np.ndarray) -> np.ndarray:\n\n # express x as z = x/(x-1)\n z = x / (x - 1)\n\n # special case @n=0\n if n == 0:\n kn = 1 - self._vlerchphi(1 / z, n + 1)\n else:\n kn = 1 / n - self._vzlerchphi(1 / z, n + 1)\n\n # return\n return kn", "def psi_prime(n,x):\r\n a = 1/(sqrt((2**n)*fac(n)*sqrt(pi)))\r\n b = (e)**(-1*(x**2)*0.5)\r\n third_factor = (-1*x*H(n,x))+(2*n*H(n-1,x))\r\n return a*b*third_factor", "def PN(self, n):\n if not self.isVaild():\n pass\n if n < self.C:\n return self.P0()*(self.r()**n)/math.factorial(n)\n else:\n return self.P0()*(self.r()**n)/(math.factorial(self.C)*self.C**(n-self.C))", "def dbinom(self, x, n, p):\n f = math.factorial\n C = Decimal(f(n) / (f(x) * f(n-x)))\n return C * p**x * (1-p)**(n-x)", "def modexp(x,y,n):\n\tif y == 0: return 1\n\n\tpartial = modexp(x, y/2, n)\n\n\tif y%2 == 0: return (partial**2) % n\n\telse: return (x*partial**2) % n", "def psi(n,x):\r\n a = 1/(sqrt((2**n)*fac(n)*sqrt(pi)))\r\n b = (e)**(-1*(x**2)*0.5)\r\n H_n = H(n,x)\r\n return a*b*(H_n)", "def power(x, n):\n # Negative and fractional powers are not allowed\n if n < 0:\n raise ValueError('n cannot be negative')\n elif 0 < n < 1.0:\n raise ValueError('n cannot be fractional')\n\n result = 1\n for _ in range(n):\n result = multiply(result, x)\n return result", "def ne(n):\n return 4*n*n - 2*n + 1", "def square_and_multiply(x, exponent, n):\n result = 1\n while exponent > 0:\n if exponent % 2:\n result = (result * x) % n\n x = (x * x) % n\n exponent = exponent // 2\n return result", "def power(x, n, id, sqr, mul):\n\n ## First, we need to find a 2^{2^i} > n.\n i = 1\n while (n >> i) > 0: i = 2*i\n\n ## Refine our estimate, so that 2^{k-1} <= n < 2^k.\n k, m = 1, n\n while m > 1:\n i = i >> 1\n mm = m >> i\n if mm > 0: m, k = mm, k + i\n\n ## Now do the square-and-multiply thing.\n y = id\n for i in xrange(k - 1, -1, -1):\n y = sqr(y)\n if (n >> i)%2: y = mul(x, y)\n\n ## We're done.\n return y", "def solve(n=1000):\r\n return str(sum(x**x for x in range(1, n + 1)))[-10:]", "def __pow__(self, n): \n\n if n > 0:\n pow = self.clone()\n for i in range(1, n):\n pow *= self\n elif n == 0:\n return moeb_id\n else:\n pow = self.clone().inv()\n inv = self.inv().clone()\n for i in range(1, - n):\n pow *= inv\n\n return pow", "def __ipow__(self,n):\r\n\t\t\r\n\t\treturn self.power(n)", "def problem9_naive(n):\n for a in range(4, n, 4):\n for b in range(3, n - a):\n c = n - a - b\n if a ** 2 + b ** 2 == c ** 2:\n return a * b * c\n return None", "def n_root_of_x(n, x):\n if n==0:\n return 1\n \n return 1 if n==0 else x**(1.0/n)", "def nthRoot(x,n):\n return op.pow(x,1/n)", "def pow(f, n):\n return f.per(dmp_pow(f.rep, n, f.lev, f.dom))", "def pow(op, n):\n return compose(* ([op] * n))", "def mod_pow(x,e,p):\n x = x % p\n R = 1\n while e > 0 :\n if (e%2) == 1 :\n R = (R*x) % p\n e = e//2\n x = (x*x) % p \n return(R)", "def next_pow_two(n):\n i = 1\n while i < n:\n i = i << 1\n return i", "def power1(x, n):\n if n == 0:\n return 1\n else:\n return x * power1(x, n - 1)", "def dig_pow(n, p):\n t = sum(pow(int(j), p+i) for i, j in enumerate(str(n)))\n return t/n if t % n == 0 else -1", "def power1(x, n):\n if n == 0:\n return 1\n return x * power1(x, n-1)", "def evansMod(x,n):\n if x%n == 0:\n return 1\n else:\n return 0", "def PBpoly(n, x):\n n = int(n)\n return Bpoly(n, x-math.floor(x))", "def psi(n, x):\n H = h(n, x, orthonormal=True)\n weight = np.exp(-(x ** 2) / 2)\n psi = H * weight\n return psi", "def generarN(self, bits):\n \n p = self.generarPrimo(bits//2)\n while 1:\n q = self.generarPrimo(bits//2)\n if p != q:\n return p * q", "def powAlpha( n ):\n return (1-betaval)*Fib(n) + Fib(n-1)\n #return Fib(n+1) - Fib(n) * betaval", "def zzx_pow(f, n):\n if not n:\n return [INT_ONE]\n if n == 1 or f == [] or f == [1]:\n return f\n\n g = [INT_ONE]\n\n while True:\n n, m = n//2, n\n\n if m & 1:\n g = zzx_mul(g, f)\n\n if n == 0:\n break\n\n f = zzx_sqr(f)\n\n return g", "def f(i):\n return e(2**N-1-i) ^ 2**(N-1)", "def _pow_(self, n):\n assert n > 0\n return generic_power(self, n)", "def int_pow_fixed(y, n, prec):\n if n == 2:\n return (y*y), 0\n bc = bitcount(y)\n exp = 0\n workprec = 2 * (prec + 4*bitcount(n) + 4)\n _, pm, pe, pbc = fone\n while 1:\n if n & 1:\n pm = pm*y\n pe = pe+exp\n pbc += bc - 2\n pbc = pbc + bctable[int(pm >> pbc)]\n if pbc > workprec:\n pm = pm >> (pbc-workprec)\n pe += pbc - workprec\n pbc = workprec\n n -= 1\n if not n:\n break\n y = y*y\n exp = exp+exp\n bc = bc + bc - 2\n bc = bc + bctable[int(y >> bc)]\n if bc > workprec:\n y = y >> (bc-workprec)\n exp += bc - workprec\n bc = workprec\n n = n // 2\n return pm, pe", "def pow(a: float, n: int):\n if n == 0:\n return 1\n elif n % 2 == 0: # power n - even\n return pow(a**2, n//2)\n else: # power n - odd\n return pow(a, n-1)*a", "def mask(n):\n if n >= 0:\n return 2**n - 1\n else:\n return 0", "def mask(n):\n if n >= 0:\n return 2**n - 1\n else:\n return 0", "def calculateDeCrypt(asci: int, d: int, n: int) -> int:\n return pow(int(asci),d,n)", "def log_multinomial_coefficient(n, x):\n return gammaln(n + 1) - gammaln(x + 1).sum()", "def fast_exp(a, x, n):\n x_2 = int2bin(x)\n vprint(\"{} = [{}]_2\".format(str(x), x_2))\n powers = [a % n]\n vprint(\"{}^(2^0) = {}^1 = {} \\\\equiv {}\".format(a, a, a, (a % n)))\n i = 1\n while i < len(x_2):\n # This (hilariously ugly) print statement prints the\n # intermediary operations in a format that can be easily\n # exported to LaTeX. TODO: Split it up into sane chunks.\n vprint(\"{}^{{ {}^{} }} = {}^{{ {} }} = {}^{{ {} }} * {}^{{ {} }} = {}*{} = {} \\\\equiv {}\".format(\n a, 2, i,\n a, pow(2, i),\n a, pow(2, i-1),\n a, pow(2, i-1),\n powers[-1], powers[-1],\n powers[-1] * powers[-1],\n (powers[-1] * powers[-1]) % n))\n next_power = (powers[-1] * powers[-1]) % n\n powers.append(next_power)\n i += 1\n\n vprint(\"{}^{{ {} }} = ...\".format(a, x))\n rpowers = list(reversed(powers))\n prod = 1\n i = 0\n while i < len(x_2):\n bit = x_2[i]\n power = rpowers[i]\n if bit == \"1\":\n vprint(\"* {} \\t== {}^{{ 2^{{ {} }} }}\\n\".format(power, a, len(x_2) - i - 1))\n prod *= power\n i += 1\n result = prod % n\n vprint(\"= {} \\\\equiv {}\".format(prod, result))\n return result", "def fn(n):\n if n == 0: return 1\n return sum(fn(i)*fn(n-i-1) for i in range(n))", "def Bpoly(n, x):\n n = int(n)\n out = 0\n for k in xrange(0, n+1):\n out += comb(n,k)*Bnum(n-k)*x**float(k)\n return out", "def nw(n):\n return 4*n*n + 1", "def binomial(n, p):\n sum_ans = 0\n for k in range(n):\n sum_ans = sum_ans + bernoulli(p)\n return sum_ans", "def solution(n,p):\n \n a=pow(n, (p - 1) // 2, p)\n if(a==1):\n return True\n else :\n return False", "def _power(self, a, n, m):\n res = 1\n while n != 0:\n if n % 2 != 0:\n res *= a\n res %= m\n n -= 1\n else:\n a *= a\n a %= m\n n //= 2\n return res", "def ramanujan_hardy_asymptotic(n):\n if(n != int(n)):\n raise ValueError(\n \"n must be integer\"\n )\n \n return int((1/(4*n*math.sqrt(3)))*math.exp(math.sqrt(2*n/3)))", "def fast_expo(a, n, mod):\n if n == 0:\n return 1\n x = fast_expo(a, int(n / 2), mod)\n if n % 2 == 0:\n return pow(x, 2) % mod\n else:\n return a * pow(x, 2) % mod", "def combinations(n) -> float:\r\n c = math.factorial(n) / (math.factorial(2) * math.factorial(n - 2))\r\n return c", "def base_binom_num(x,n0):\n res = stats.binom.pmf(range(n0+1), n0, 1/2.0) \n a = 0 \n for i in range(n0+1):\n if i <= x:\n a = a +res[i]\n return a", "def question_30(x: int) -> int:\n # Base case below:\n if x == 0:\n return 1\n # Recursive function below:\n else:\n result = 1\n while x > 0:\n for i in range(x):\n result *= 2 ** question_30(i)\n x -= 1\n return result", "def pythagorean_triples(n):\n pass", "def binomial_coefficient3(n, k):\n return reduce(lambda a, b: a * (n - b) / (b + 1), xrange(k), 1)", "def phi(n: int) -> int:\n result = 1\n for i in range(2, n):\n if gcd(i, n) == 1:\n result += 1\n return result", "def newton(n):\n x = n\n y = (x + 1) // 2\n while y < x:\n x = y\n y = (x + n // x) // 2\n return x", "def nth_pow(x, n, name=None):\n if not n >= 0:\n raise ValueError(\"n (power) has to be >= 0. Currently, n={}\".format(n))\n\n power = to_complex(1.)\n for _ in range(n):\n power *= x\n return power", "def nCWRk(n, r):\n val = 1\n for i in range(1, r+1):\n val *= n + r - i\n val //= i\n return val", "def euler_phi(n):\n\tif n == 1: return 1\n\tif n <= 0: return 0\n\t# For each prime factor p with multiplicity n, a factor of (p**(n-1))*(p-1)\n\treturn functools.reduce(lambda a,x:a*(x[0]**(x[1]-1))*(x[0]-1),factor(n),1)", "def power(a, n):\n result = 1\n exponent_is_negative = n < 0\n\n n = abs(n)\n while n > 0:\n result *= a\n n -= 1\n\n if exponent_is_negative is True:\n result = 1 / result\n\n return result", "def computePow (m,n,e):\n p = m # p will hold m^{2^j}\n r = 1 # r is the result\n while ( e > 0):\n if (e %2 == 1):\n r = (r * p)%n\n p = (p*p) % n\n e = e // 2\n return r %n", "def combin(n, k):\n\tif k > n//2:\n\t\tk = n-k\n\tx = 1\n\ty = 1\n\ti = n-k+1\n\twhile i <= n:\n\t\tx = (x*i)//y\n\t\ty += 1\n\t\ti += 1\n\treturn x", "def multiple_comparisons(p, n):\r\n if p > 1e-6: # if p is large and n small, calculate directly\r\n return 1 - (1 - p) ** n\r\n else:\r\n return one_minus_exp(-n * p)", "def modExp(a, b, n):\n c = 0\n d = 1\n for bi in bin(b)[2:]:\n c = 2 * c\n d = (d * d) % n\n if bi == '1':\n c += 1\n d = (d * a) % n\n return d", "def binomial(n: int, p: float) -> int:\n return sum(bernoulli_trial(p) for _ in range(n))", "def pow2(x: int, p: int) -> int:\n while p > 0:\n x = x * x % q\n p -= 1\n return x", "def modular_exponentiation(x, y, n):\r\n result = 1\r\n while y > 0:\r\n if y & 1 == 1:\r\n result = (result * x) % n\r\n\r\n y = y >> 1\r\n x = (x * x) % n\r\n return result", "def powmod(b,e,n):\r\n\treturn power_mod(b,e,n)", "def binomial(n, k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in range(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return (ntok // ktok) % MOD\n else:\n return 0", "def euler_phi(n):\r\n\t# For each prime factor p with multiplicity n, a factor of (p**(n-1))*(p-1)\r\n\treturn reduce(lambda a,x:a*(x[0]**(x[1]-1))*(x[0]-1),factor(n),1)", "def binomial(n, k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in range(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0", "def sumn_pow2(n):\n return (n * (n + 1) * (2 * n + 1)) / 6", "def prevpow2(i):\n n = 1\n while 2*n <= i: n *= 2\n return n", "def binomial_coefficient2(n, k):\n if 0 <= k <= n:\n p = 1\n for t in xrange(min(k, n - k)):\n p = (p * (n - t)) // (t + 1)\n return p\n else:\n return 0", "def my_func(x, y):\n result = 0\n pow_res = 1\n while y:\n pow_res= pow_res*x\n y +=1\n\n result = 1 / pow_res\n\n\n return result", "def solution2(n):\n ones = 0\n while n > 0:\n if n & 1:\n ones += 1\n n = n >> 1\n\n return 0 if ones % 2 == 0 else 1", "def Z(n):\n count5 = 0\n i = 1\n while 1:\n a = pow(5, i)\n if a > n:\n return count5\n else:\n count5 += n/a\n i += 1", "def cheb_poly(x, n):\n if n == 0:\n return anp.array([1 for i in x])\n elif n == 1:\n return x\n else:\n return 2*x*cheb_poly(x, n-1)-cheb_poly(x, n-2)\n\n raise NotImplementedError(\"Problem 6 Incomplete\")", "def nextpow2(i):\n n = 1\n while n < i:\n n *= 2\n return n", "def square_difference(n):\n\n return n*(n+1)*(3*n+2)*(n-1)/12", "def fakultet (n = 1):\n sum = 1\n for i in range(n, 1, -1):\n sum *= i\n return sum", "def P_Tn(self,\n yn:float,\n n:int) -> float:\n return 1 / ((1 + yn * 0.5) **n)", "def probability(n, k, p):\n prob = 0\n power = expotentation_by_squaring((1-p), n)\n count_mult = math.log(n, 2)\n p_fraction = p/(1-p)\n count_mult += 1\n for i in range(0, k+1):\n element = newton(n, i)*power\n prob += element\n power *= p_fraction\n count_mult += 2\n return prob, count_mult", "def power(a, n: int):\n # Shortcuts are evaluated here to avoid code duplication\n\n if a == 0:\n if n > 0:\n return 0 # 0^n = 0 for n > 0\n\n if n == 0:\n return 1 # a^0 = 1 (0^0 = 1 is a convention in number theory)\n\n if n == 1:\n return a # a^1 = a\n\n if a == 1:\n return a # 1^n = 1, n integer\n\n def sqr_mul(x, e):\n if e == 1:\n return x\n elif e % 2 == 0:\n return sqr_mul(x * x, e // 2)\n elif e % 2 != 0 and e > 2:\n return x * sqr_mul(x * x, (e - 1) // 2)\n\n return sqr_mul(a, n)", "def powmod(b,e,n):\n\treturn power_mod(b,e,n)", "def binomial_coefficient(n, k):\n try:\n xrange\n except NameError:\n xrange = range\n def log_factorial(num):\n _sum = 0\n for i in xrange(2, num+1):\n _sum += log(i)\n return _sum\n return int(round(exp(log_factorial(n) - log_factorial(k) - log_factorial(n-k)), 0))" ]
[ "0.76149625", "0.73387146", "0.7324877", "0.7267193", "0.72311145", "0.71934336", "0.7170515", "0.707581", "0.69598484", "0.69359", "0.69332993", "0.6889131", "0.6868321", "0.68481904", "0.6832225", "0.6832225", "0.6831433", "0.6818101", "0.67940444", "0.67862517", "0.67858016", "0.67734426", "0.6770455", "0.6766228", "0.6759529", "0.67584383", "0.67534715", "0.67497474", "0.6730681", "0.6719614", "0.67181164", "0.6706268", "0.67020637", "0.6698544", "0.66818374", "0.6649452", "0.66419375", "0.66405976", "0.66398257", "0.6632869", "0.66297024", "0.66214424", "0.661027", "0.6608616", "0.6594819", "0.6594598", "0.6592154", "0.6567282", "0.65647393", "0.6563289", "0.65335095", "0.65335095", "0.6522943", "0.6515849", "0.6511484", "0.65109885", "0.6495003", "0.64800787", "0.6477474", "0.6475635", "0.64432824", "0.64386654", "0.6423399", "0.64216983", "0.64161885", "0.6415574", "0.64082074", "0.64056826", "0.64015454", "0.6400975", "0.6382135", "0.63815606", "0.6372286", "0.6370171", "0.63661504", "0.636481", "0.6351988", "0.6351876", "0.6350306", "0.63486886", "0.63391936", "0.6336937", "0.6336301", "0.63344544", "0.6331612", "0.6329905", "0.63274294", "0.63231856", "0.6321327", "0.6317155", "0.6314659", "0.6309849", "0.63014597", "0.6291349", "0.6290932", "0.62889415", "0.6280973", "0.6280412", "0.6280409", "0.6278258" ]
0.71249187
7
Create an invalid base log entry for stepbystep creation.
def create_base_entry(vin="INVALID", time_unix=None): return LogEntry(vin=vin, app_id="INVALID", time_unix=time_unix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_step(self, step):\n raise NotImplementedError", "def createBaseLine(arguments): \n projectSource, projectName = \"\", \"\"\n projectSource, projectName = checkOS(arguments)\n testTempFile = tempfile.TemporaryFile()\n outputFile_name = \"RUN_\" + projectName + \"-planner_g_rt.\" + projectName + \"-initial-state.nddl.PlannerConfig.xml.output\"\n outputFile_path = search_file(outputFile_name, projectSource)\n if outputFile_path == None:\n sys.stderr.write(\"Error: file does not exist try running make in \" + projectSource)\n sys.exit(1)\n filePath = checkPath(outputFile_path, projectSource) \n parsePlanOutput(filePath, testTempFile)\n baseFile_name = os.path.join(projectSource, projectName + \"_Base.output\")\n baseFile = open(baseFile_name, \"w\")\n testTempFile.seek(0)\n for line in testTempFile.readlines():\n baseFile.write(line)\n baseFile.close()", "def _create_failure_entry(self):\r\n # view task entry for task failure\r\n progress = {'message': TEST_FAILURE_MESSAGE,\r\n 'exception': TEST_FAILURE_EXCEPTION,\r\n }\r\n return self._create_entry(task_state=FAILURE, task_output=progress)", "def test_create_unexpected_problem(self):\n pass", "def test_no_such_step(self):\n with self.assertRaises(Exception):\n self.run_step('FAKE-STEP.no-exists')", "def test_config_step_raises(self):\n\n run_step = self.ConfigStep.create({\n 'name': 'run_step',\n 'job_type': 'run_odoo',\n })\n\n create_step = self.ConfigStep.create({\n 'name': 'test_step',\n 'job_type': 'create_build',\n })\n\n config = self.Config.create({'name': 'test_config'})\n\n # test that the run_odoo step has to be the last one\n with self.assertRaises(UserError):\n config.write({\n 'step_order_ids': [\n (0, 0, {'sequence': 10, 'step_id': run_step.id}),\n (0, 0, {'sequence': 15, 'step_id': create_step.id}),\n ]\n })\n\n # test that the run_odoo step should be preceded by an install step\n with self.assertRaises(UserError):\n config.write({\n 'step_order_ids': [\n (0, 0, {'sequence': 15, 'step_id': run_step.id}),\n (0, 0, {'sequence': 10, 'step_id': create_step.id}),\n ]\n })", "def __init__(self, name: str, createStepName: str, fields: str = \"\"):\n pass", "def creation_error(src_dict: Dict[str, List[Union['Repeater', 'Step']]], e: str) -> str:\n return \"Sequencer error in %s: %s\\n\" % (json.dumps(src_dict), e)", "def test_no_base_date(self):\n data = self._data()\n data.pop('base_date')\n steps = [{'dateTime': '2012-06-07', 'value': '10'}]\n TimeSeriesData.objects.create(\n user=self.user,\n resource_type=TimeSeriesDataType.objects.get(\n category=TimeSeriesDataType.activities, resource='steps'),\n date=steps[0]['dateTime'],\n value=steps[0]['value']\n )\n response = self._mock_utility(response=steps, get_kwargs=data)\n self._check_response(response, 100, steps)", "def InsertLog():", "def build_step(self):\n pass", "def build_step(self):\n pass", "def __init__(self, add_newline=False):\n super().__init__()\n self.failures = []\n\n # If `add_newline` is True then the failures will start one row below,\n # in the logs. This is useful for having the failures starting on an\n # empty line, keeping the formatting nice and clean.\n if add_newline:\n self.failures.append(\"\")", "def test_write_to_console_fail(self, _step: PropertyMock):\n _step.return_value = None\n step = exposed.ExposedStep()\n with self.assertRaises(ValueError):\n step.write_to_console('hello')", "def __init__(self, message=\"Undefned AssertionError\"):\n config.log.critical(\"%s\" % (message))", "def creation_error(src_dict: Dict[str, List[str]], e: str):\n return \"LED Group error in %s: %s\\n)\" % (json.dumps(src_dict), e)", "def log_create(sender, instance, created, **kwargs):\n if created:\n changes = model_instance_diff(None, instance)\n\n log_entry = LogEntry.objects.log_create(\n instance,\n action=LogEntry.Action.CREATE,\n changes=json.dumps(changes),\n )\n log_created.send(\n sender=LogEntry,\n old_instance=None,\n new_instance=instance,\n log_instance=log_entry,\n )", "def create_exception(self, msg: str):", "def test_post_add_log_event(self):\n pass", "def _init_log(self):\n if not os_path_exists(self.log_file):\n self._write('', 'w')", "def logStarted(build, step, log):", "def __init__(self):\n s = \"{0}\\n{1:^150}\\n{0}\\n\".format(\"=\"*150, \"N E B I L A N D\")\n self.log(s)\n self.table_log(\"Iteration\", \"Datetime\",\n \"Event\", \"Entity Affected\", \"Extra Info\")\n self.log(\"-\"*150)", "def create_hdf5_logger(self):\n super(Inertial_Logger,self).create_hdf5_logger()\n self.logger.add_attribute(self.trial_info_path, 'mode', 'inertial trajectory')", "def getStep():\n # TODO: can there be non-Step logs?", "def build_step(self):\n\n pass", "def new_custom_log_dir(self) -> str:", "def test_add_hive_partition(self, mock_logging):\n self.client.add_hive_partition(None)\n\n assert_true(mock_logging.error.called)", "def __init__(\n self,\n name: str,\n createStepName: str,\n eventSeriesType: str,\n transformType: str = NONE,\n timeSpan: str = STEP_TIME,\n transformations: str = \"\",\n fileName: str = \"\",\n data: str = \"\",\n ):\n pass", "def _create_target_path(self, path):\n if not os.path.exists(path) and not self._dry_run:\n logging.debug('Creating target path: %s ...', path)\n try:\n os.makedirs(path)\n except OSError:\n raise LetMeError('Unable to create target path: %s' % path)", "def create_log(self):\n from settings import evidence_path\n test_case = self.__class__.__name__\n log_extension = '.log'\n if evidence_path is not None:\n log_path = '{}/{}{}'.format(\n evidence_path, test_case, log_extension\n )\n else:\n log_path = None\n self.log = Log(log_path)\n self.log = self.log.get_logger()\n return self.log", "def test_create_event_model_missing_creator(self):\n with self.assertRaises(ValidationError):\n e = Event(title=self.TITLE)\n e.save()", "def _ConstructStep(self, log_processor_class, logfile,\n factory_properties=None, perf_expectations_path=None):\n factory_properties = factory_properties or {}\n self._log_processor_class = chromium_utils.InitializePartiallyWithArguments(\n log_processor_class, factory_properties=factory_properties,\n report_link=self._report_link, output_dir=self._output_dir,\n perf_name='test-system', test_name='test-name',\n perf_filename=perf_expectations_path)\n step = chromium_step.ProcessLogShellStep(self._log_processor_class)\n log_file = self._LogFile(\n 'stdio', open(os.path.join(test_env.DATA_PATH, logfile)).read())\n self._SetupBuild(step, self._revision, self._webkit_revision, log_file)\n return step", "def _make_log_dir(self, path):\n\n try:\n os.makedirs('/'.join([self._logpath, path]))\n except OSError, e:\n # Return True if dir already exists\n if e.args[0] is 17:\n return\n\n # Some other error; raise exception\n raise e\n\n return", "def test_make_output_fail():\n with pytest.raises(ValueError):\n make_output_format('dummy_format', LOG_DIR)", "def test_no_step_defaults(self):\n es = exposed.ExposedStep()\n self.assertIsNone(es._step)", "def create_log(self, num_machines):\n\n # generates a folder for logs if one does not exist\n os.makedirs('logs', exist_ok=True)\n\n # record extra info at the top of the log file\n extra_info = [f'num machines: {num_machines}', f'ticks per second: {self.ticks_per_second}', f'lifetime: {self.lifetime}']\n dummy_info_dict = {k:info for k, info in zip(LogEntry.ENTRY_ORDER, extra_info)}\n\n with open(self.log_filename, mode='a') as log_file:\n writer = csv.DictWriter(log_file, fieldnames=LogEntry.ENTRY_ORDER)\n writer.writerow(dummy_info_dict)\n writer.writeheader()", "def __post_init__(self) -> None:\n self.language = self.pipeline.language or self.hass.config.language\n\n # stt -> intent -> tts\n if PIPELINE_STAGE_ORDER.index(self.end_stage) < PIPELINE_STAGE_ORDER.index(\n self.start_stage\n ):\n raise InvalidPipelineStagesError(self.start_stage, self.end_stage)\n\n pipeline_data: PipelineData = self.hass.data[DOMAIN]\n if self.pipeline.id not in pipeline_data.pipeline_runs:\n pipeline_data.pipeline_runs[self.pipeline.id] = LimitedSizeDict(\n size_limit=STORED_PIPELINE_RUNS\n )\n pipeline_data.pipeline_runs[self.pipeline.id][self.id] = PipelineRunDebug()", "def render_entry_log(self):\n self.render_log(self.selenium_testcase_entry_template)", "def test_create_log(self):\n message = \"Message is {0}\".format(random.random())\n resp = gracedb.writeLog(eventId, message)\n self.assertEqual(resp.status, 201)\n new_log_uri = resp.getheader('Location')\n new_log = resp.json()\n self.assertEqual(new_log_uri, new_log['self'])\n check_new_log = gracedb.get(new_log_uri).json()\n self.assertEqual(check_new_log['comment'], message)", "def record(self, step):", "def test_generate_03_raise_exception(self):\n move = self.get_new_move(3)\n form_wizard = Form(self.env['stock.assign.serial'].with_context(\n default_move_id=move.id,\n default_next_serial_number='code-xxx',\n ))\n wiz = form_wizard.save()\n with self.assertRaises(UserError):\n wiz.generate_serial_numbers()\n\n form_wizard.next_serial_count = 0\n # Must raise an exception because `next_serial_count` must be greater than 0.\n with self.assertRaises(ValidationError):\n form_wizard.save()", "def StepFailure(self):\n return recipe_api.StepFailure", "def start_step(self, _, step, **kwargs):\n if self._cfg.log_layout is not LogLayout.SCENARIO:\n step_content = self._build_step_content(step)\n self._step_id = self._rp.start_test_item(\n name=f\"[{step.keyword}]: {step.name}\",\n start_time=timestamp(),\n item_type=\"STEP\",\n parent_item_id=self._scenario_id,\n code_ref=self._code_ref(step),\n description=step_content,\n has_stats=False\n if self._cfg.log_layout is LogLayout.NESTED\n else True,\n **kwargs,\n )\n self._log_item_id = self._step_id\n if self._cfg.log_layout is LogLayout.NESTED and step_content:\n self.post_log(step_content)", "def log_failure(self, obj, message):\n super().log_failure(obj=obj, message=message)", "def logStep(self, pid, desc, landsatScene):\n\n with self.getConnection() as conn:\n try:\n cur = conn.cursor()\n cur.execute(\"\"\"\\\n insert into process_run ('pUID', 'Desc', 'PATH', 'ROW', 'Acqdate', 'fk_wfid')\n values (?, ?, ?, ?, ?, ?)\"\"\", (pid, desc, landsatScene.path, landsatScene.row, landsatScene.acqdate, self.wfid))\n cur.close()\n\n except sqlite3.Error as error:\n cur.close()\n raise workflowException('Error accessing database: {0}'.format(repr(error)))\n return", "def _make_child_error(msg, module, name, traceback, log, log_type, context):\n return ChildError(msg, module, name, traceback, log, log_type, context)", "def _test_missing_current_task(self, task_class):\r\n task_entry = self._create_input_entry()\r\n with self.assertRaises(ValueError):\r\n task_class(task_entry.id, self._get_xmodule_instance_args())", "def __init__(self, args, logger: MainLogger, log_start_t=0):\n\n super().__init__(args, logger)\n self.batch_size = self.args.batch_size_run\n assert self.batch_size == 1\n\n self.env = env_REGISTRY[self.args.env](**self.args.env_args)\n # Find id of the first policy team - Only supported for one policy team in the build plan\n teams = args.env_args[\"match_build_plan\"]\n self.policy_team_id = get_policy_team_id(teams)\n if self.args.headless_controls:\n controls = HeadlessControls(env=self.env)\n controls.daemon = True\n controls.start()\n\n self.episode_limit = self.env.episode_limit\n self.t = 0 # current time step within the episode\n self.log_start_t = log_start_t # timestep to start logging from\n self.t_env = 0 # total time steps for this runner in the provided environment across multiple episodes\n self.phi: FeatureFunction = feature_func_REGISTRY[self.args.sfs] if self.args.sfs else None\n self.home_batch = None\n self.home_mac = None\n self.new_batch_fn = None", "def __init__(self, batch_size, log_steps):\n self.batch_size = batch_size\n super(TimeHistory, self).__init__()\n self.log_steps = log_steps\n\n # Logs start of step 0 then end of each step based on log_steps interval.\n self.timestamp_log = []", "def test_fails_with_no_base(self):\n assert self.add_statestream() is False", "def __init__(self):\n self._logger = logging.getLogger(__name__)\n self.step_name = \"OpenFDA\"", "def insertIntoStepLog(self, data: Dict) -> int:\n step_payload = {\n **data,\n **{\n \"step_name\": \"Data Loader\",\n \"step_end_ts\": str(datetime.datetime.now()),\n \"upsert_by\": \"DLoaderMS\",\n \"upsert_ts\": str(datetime.datetime.now()),\n },\n }\n\n insertQuery = \"\"\"\n INSERT INTO file_process_step_log\n (file_process_id,\n step_name,\n step_status,\n step_status_detail,\n step_start_ts,\n step_end_ts,\n upsert_by,\n upsert_ts)\n VALUES ( '{file_process_id}',\n '{step_name}',\n '{step_status}',\n '{step_status_detail}',\n timestamp '{step_start_ts}',\n timestamp '{step_end_ts}',\n '{upsert_by}',\n timestamp '{upsert_ts}' ) \n RETURNING step_id\n \"\"\"\n cursor = self.engine.cursor()\n try:\n cursor.execute(insertQuery.format(**step_payload))\n step_id = cursor.fetchone()[0]\n return step_id\n except Exception as e:\n raise DLoaderException(\n \"Failed while inserting data into audit table {0}\".format(e)\n )\n finally:\n cursor.close()", "def create_log(self, create_log):\n\n self._create_log = create_log", "def __init_log_folder():\n try:\n os.makedirs(Logger.__log_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e", "def create_log(self, exc):\n return self.formatter.formatException(exc)", "def __init__(self, datatype, stage=\"\", context=\"\"):\n filler = \"unspecified\"\n if isinstance(datatype, str):\n typename = datatype\n else:\n try:\n typename = datatype.__name__\n except AttributeError:\n typename = str(datatype)\n explanation = \"Error creating {dt}; stage: {s}; context: {c}\".\\\n format(dt=typename, s=stage or filler, c=context or filler)\n super(ModelConstructionException, self).__init__(explanation)", "def write_line(self, line):\n # TODO(iannucci): have step_runner log the step metadata as a protobuf\n # and/or put it in the Step proto message.\n return self.logging.write_line(line)", "def __init__(self, message=\"\"):\n super(AutomationError, self).__init__(message)", "def append_record_failure():\n\t\tpass", "def add_step_entry(entry_message, data=''):\n return partial(__add_entry,\n event_type='STEP',\n entry_message=entry_message,\n data='')", "def bdev_error_create(client, base_name, uuid=None):\n params = {'base_name': base_name}\n if uuid is not None:\n params['uuid'] = uuid\n return client.call('bdev_error_create', params)", "def test_create_experiment_hit_manual_branch(self):\n new_space = {\"y\": \"uniform(0, 10)\"}\n with OrionState(experiments=[config]) as cfg:\n create_experiment(\n config[\"name\"],\n space=new_space,\n branching={\"enable\": True},\n storage=cfg.storage_config,\n )\n\n with pytest.raises(BranchingEvent) as exc:\n create_experiment(\n config[\"name\"],\n version=1,\n space=new_space,\n branching={\"enable\": True},\n )\n\n assert \"Configuration is different and generates\" in str(exc.value)", "def test_file_creation(data, logging_file_name):\n create_instance(data, logging_file_name)\n log_file_name = create_file_path(logging_file_name)\n print(log_file_name)\n if data is None or len(data) == 0:\n assert not os.path.exists(log_file_name)\n else:\n assert os.path.exists(log_file_name)", "def build_log_entry(\n hostname: str, user: str, date: dt.datetime, wdir: Path, cmd: str\n) -> str:\n return (\n f'[{date.strftime(\"%Y-%m-%d %H:%M:%S\")}] ({user}@{hostname}) '\n f\"{wdir}\\n\\t{cmd}\\n\"\n )", "def __init__(self, level, general_log_path, outputs_folder):\n self.log_level = level\n\n # self.general_log_file = general_log_path.open('w')\n self.general_log_file = GCOpen(general_log_path, 'w')\n self.general_log_file.open()\n\n self.file_outputs_dir = outputs_folder / 'output_files'\n # self.file_outputs_dir.mkdir(exist_ok=True)\n\n exp_name = str(outputs_folder).split('/')[-1]\n\n self.summary_writer = SummaryWriter(log_dir=str(TEMP_FOLDER),\n filename_suffix='.' + exp_name)\n tf_filename = find_tf_event(exp_name)\n self.sw_local_path = Path(TEMP_FOLDER) / tf_filename\n self.sw_gc_path = outputs_folder / tf_filename\n\n self.log(\"Starting new experiment at \" +\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n self.log(\"User: \" + getpass.getuser())\n self.log(\"Host: \" + socket.gethostname())\n\n Logger.unique_logger = self", "def init (path = None, filename = None, create = True, path_add = None) :\n if path_add is None:\n path_add=[]\n if path is None :\n path = sys.hal_log_values [\"__log_path\"]\n \n if path == \"###\" :\n if sys.platform.startswith(\"win\") :\n path = \"d:\\\\temp\" if os.path.exists (\"d:\\\\temp\") else \"c:\\\\temp\"\n path = os.path.join (path, \"log_pyquickhelper\")\n else :\n path = \"/tmp\"\n path = os.path.join (path, \"log_pyquickhelper\")\n \n if len (path_add) > 0 : \n if not isinstance (path_add, list) : path_add = [ path_add ]\n temp = []\n for p in path_add :\n spl = os.path.splitext (p)\n temp.append (spl [0])\n path = os.path.join (path, *temp)\n \n if filename is None :\n filename = sys.hal_log_values [\"__log_file_name\"]\n \n if (sys.hal_log_values [\"__log_path\"] != path or sys.hal_log_values [\"__log_file_name\"] != filename) \\\n and sys.hal_log_values [\"__log_file\"] != None :\n sys.hal_log_values [\"__log_file\"].close ()\n sys.hal_log_values [\"__log_file\"] = None\n sys.hal_log_values [\"__log_path\"] = path\n sys.hal_log_values [\"__log_file_name\"] = filename\n \n if create :\n if not os.path.exists (sys.hal_log_values [\"__log_path\"]) :\n os.makedirs (sys.hal_log_values [\"__log_path\"])\n else :\n if not os.path.exists (sys.hal_log_values [\"__log_path\"]) :\n raise PQHException (\"unable to find path \" + sys.hal_log_values [\"__log_path\"])", "def log_step(step: int, message: str, stdout: bool = True) -> None:\n log(f\"Step {step:6d}: {message}\", stdout=stdout)", "def step(self):\n raise TaskError(\"Task %s: subclass should override step() method!\" %\n self)", "def add_step(self, step):\n if not step:\n return\n temp = {Result.__STEP: step.get_name(),\n Result.__STATUS: step.get_status(),\n Result.__MESSAGE: step.get_message()}\n self.__run.append(temp)", "def __init__(self, obj):\n super().__init__(\n \"Directory {} doesn't exist or invalid.\"\n \"Please specify a valid Bitbake build directory\".format(obj)\n )", "def test_create_existing_episode(self):\n episode = self._create_sample_episode()\n with self.assertRaises(ValueError, msg='Episode already exists.'):\n self.storage.create_episode(episode)", "def test_create_error_cleanup(self):\n metadata_dict = {\n 'Sample1': {'physical_location': 'location1',\n 'has_physical_specimen': True,\n 'has_extracted_data': True,\n 'sample_type': 'type1',\n 'required_sample_info_status': 'received',\n 'collection_timestamp':\n datetime(2014, 5, 29, 12, 24, 51),\n 'host_subject_id': 'NotIdentified',\n 'Description': 'Test Sample 1',\n 'group': 'Forcing the creation to fail',\n 'latitude': 42.42,\n 'longitude': 41.41}\n }\n metadata = pd.DataFrame.from_dict(metadata_dict, orient='index')\n with self.assertRaises(QiitaDBExecutionError):\n SampleTemplate.create(metadata, self.new_study)\n\n sql = \"\"\"SELECT EXISTS(\n SELECT * FROM qiita.required_sample_info\n WHERE sample_id=%s)\"\"\"\n sample_id = \"%d.Sample1\" % self.new_study.id\n self.assertFalse(\n self.conn_handler.execute_fetchone(sql, (sample_id,))[0])\n\n sql = \"\"\"SELECT EXISTS(\n SELECT * FROM qiita.study_sample_columns\n WHERE study_id=%s)\"\"\"\n self.assertFalse(\n self.conn_handler.execute_fetchone(sql, (self.new_study.id,))[0])\n\n self.assertFalse(\n exists_table(\"sample_%d\" % self.new_study.id, self.conn_handler))", "def test_config_step_create(self):\n\n config_step = self.ConfigStep.create({\n 'name': 'test_step',\n 'job_type': 'create_build',\n 'number_builds': 2,\n 'make_orphan': True,\n })\n\n config = self.Config.create({'name': 'test_config'})\n config_step.create_config_ids = [config.id]\n\n config_step._run_create_build(self.parent_build, '/tmp/essai')\n self.assertEqual(len(self.parent_build.children_ids), 2, 'Two sub-builds should have been generated')\n\n # check that the result will be ignored by parent build\n for child_build in self.parent_build.children_ids:\n self.assertTrue(child_build.orphan_result, 'An orphan result config step should mark the build as orphan_result')\n child_build.local_result = 'ko'\n\n self.assertFalse(self.parent_build.global_result)", "def __init__(self, file_path, print_too=True, override=False):\n self.file_path = file_path\n self.print_too = print_too\n if override:\n if os.path.exists(file_path):\n print('Overriding - deleting previous log...')\n os.remove(file_path)\n os.makedirs(os.path.dirname(file_path), exist_ok=True)", "def __init__(self, *args):\n this = _libsbml.new_XMLErrorLog(*args)\n try: self.this.append(this)\n except: self.this = this", "def make_DBLog(subject, event, badge, detail=''):\n app = create_app()\n with app.app_context():\n DBLog.new(subject=subject, scope=\"nox\", badge=badge, message=event, ip='-', user='-', detail=detail)", "def test_create_episode_missing_study(self):\n _, session_id = self.init_session()\n episode = sample_episode(study_id='missing', session_id=session_id)\n with self.assertRaises(ValueError):\n self.storage.create_episode(episode)", "def make_invalid_output(self):\r\n self.task_output = 'HI MY NAME IS INVALID JSON'\r\n # This should be given the value of 'unknown' if the task output\r\n # can't be properly parsed\r\n self.duration_sec = 'unknown'", "def test_pytest_bdd_with_missing_step_implementation(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 4\n assert spans[0].get_tag(ERROR_MSG)", "def start_device_log(self, log_path):\n return DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def _prepare_support_step(bubble_array: List[dict], index: int, step: str, db_user: User, lang: str) -> None:\n LOG.debug(\"%s: support case -> %s\", index, step)\n single_split_history_step = cleaned_split_history_step(step)\n if len(single_split_history_step) < 3:\n return\n single_split_history_step_enum = wrap_history_onto_enum(single_split_history_step)\n user_uid = single_split_history_step_enum.UID\n system_uid = single_split_history_step_enum.ATTITUDE_TYPE\n\n bubble = _get_bubble_from_support_step(user_uid, system_uid, db_user, lang)\n if bubble and not bubbles_already_last_in_list(bubble_array, bubble):\n bubble_array += bubble", "def __init__(self, message=\"\"):\n super(ValidationError, self).__init__(message)", "def pytest_runtest_makereport(item, call):\n if \"incremental\" in item.keywords:\n if call.excinfo is not None:\n parent = item.parent\n parent._previousfailed = item", "def test_createInvalidPortDescription(self):\n store = Store()\n factory = DummyFactory(store=store)\n self.assertFailStatus(\n 1, self._makeConfig(store),\n [\"create\", \"--strport\", \"xyz\",\n \"--factory-identifier\", str(factory.storeID)])\n self.assertEqual(\n \"'xyz' is not a valid port description.\\n\", sys.stdout.getvalue())", "def on_saga_failure(self, failed_step: BaseStep, initial_failure_payload: dict):\n logger.info(f'Saga {self.saga_id} failed on \"{failed_step.name}\" step. \\n'\n f'Failure details: {initial_failure_payload}')", "def __init__(self, **extra):\n log_msg = self.message\n if extra:\n log_msg += \" Extra info: {0}\".format(extra)\n logger.error(log_msg)\n super().__init__(extra)", "def __init__(self, additional_log_data=None):\n # Set the optional Additional Log Data that is not shown to the user\n self.additional_log_data = additional_log_data", "def __init__(self, reason, stage=\"\"):\n super(ProjectConstructionException, self).__init__(\n datatype=\"Project\", stage=stage, context=reason)", "def _CreateADictOfFailedSteps(self, build_info):\n failed_steps = dict()\n for step_name in build_info.failed_steps:\n failed_steps[step_name] = {\n 'current_failure': build_info.build_number,\n 'first_failure': build_info.build_number,\n }\n\n return failed_steps", "def _create_logfile(self):\r\n if not self.console_redirect:\r\n return None\r\n\r\n # PCU_logs.robot need a timestamp for console logs as can be run several times\r\n if self.name == self.log_test.replace('.robot', ''):\r\n return open('{0}\\{1}_console_log_{2}'.format(\r\n self.output_dir_path, self.name, datetime.now().strftime(\"%m%d%H%M\")), \"w+\")\r\n else:\r\n return open('{0}\\{1}_console_log'.format(self.output_dir_path, self.name), \"w+\")", "def test_new_invalid(self) -> None:\n with pytest.raises(TypeError) as excinfo:\n RunwayTestDefinition({}) # type: ignore\n assert str(excinfo.value).startswith(\"expected data of type\")", "def add_entry(name, title, duration, notes):\n clear()\n print('Entry added to work log!')\n return Entry.create(\n employee_name=name,\n task_title=title,\n time_spent=duration,\n task_notes=notes\n )", "def WriteFlowLogEntry(self, entry: rdf_flow_objects.FlowLogEntry) -> None:\n key = (entry.client_id, entry.flow_id)\n\n if key not in self.flows:\n raise db.UnknownFlowError(entry.client_id, entry.flow_id)\n\n entry = entry.Copy()\n entry.timestamp = rdfvalue.RDFDatetime.Now()\n\n self.flow_log_entries.setdefault(key, []).append(entry)", "def test_log_file_created(self, mock_parsing_handler, mock_api_handler, mock_progress):\n\n directory = path.join(path_to_module, \"fake_ngs_data\")\n directory_status = DirectoryStatus(directory)\n log_file = path.join(directory, \"irida-uploader.log\")\n # Check that log file does not exist before starting\n self.assertFalse(path.exists(log_file))\n\n cli_entry._validate_and_upload(directory_status, False)\n\n # Make sure log file is created\n self.assertTrue(path.exists(log_file))", "def get_base_logfile():\n return \"baseLog\" + get_day() + \".log\"", "def create_step(self, seq_descr: str, step_id: int, name: str, brigthnesses: List[Union[str, int]], smooth: int,\n wait: int) -> Tuple[Optional['Step'], str]:\n seq_name: str = Sequencer.get_name(seq_descr)\n new_step: Step = Step(Name=name, Brightness=brigthnesses, Wait=wait, Smooth=smooth)\n verified_step: Optional[Step] = Step.verify_step(new_step)\n if not verified_step:\n return None, \"wrong_step_name\"\n current_seq: Optional[Sequencer] = self.get_seq_by_name(seq_name)\n if current_seq is None:\n return None, \"no_seq_nme\"\n is_unique: bool = AuxEffects.check_unique(self, verified_step, \"Step\", current_seq)\n if not is_unique:\n return None, \"step_exists\"\n if step_id == -1:\n current_seq.Sequence.append(verified_step)\n else:\n current_seq.Sequence.insert(step_id + 1, verified_step)\n return verified_step, \"\"", "def log_create(action, *args, **kw):\n from olympia.activity.models import ActivityLog\n\n return ActivityLog.create(action, *args, **kw)", "def test_harvester_new_file_exception(self):\n\n # create the file so that it is unreadable\n self.create_sample_data_set_dir(\"node59p1_step1.dat\", TELEM_DIR, \"node59p1.dat\",\n mode=000)\n\n # Start sampling and watch for an exception\n self.driver.start_sampling()\n\n self.assert_exception(ValueError)\n\n # At this point the harvester thread is dead. The agent\n # exception handle should handle this case.", "def create_telemetry_file():\n loginfo(\"Creating telem file if it doesn't exist...\")\n with open(HAB_TELEM_FILE, \"w\"):\n pass", "def __init__(self, *args):\n this = _libsbml.new_SBMLErrorLog(*args)\n try: self.this.append(this)\n except: self.this = this" ]
[ "0.6236393", "0.55243593", "0.54124093", "0.5270877", "0.518995", "0.5183555", "0.5180253", "0.5169412", "0.5137834", "0.5133438", "0.51106143", "0.51106143", "0.5109858", "0.5108779", "0.5071119", "0.50596637", "0.5050473", "0.5044926", "0.50438124", "0.5026648", "0.50259256", "0.5009778", "0.50025195", "0.4977957", "0.49579075", "0.4957752", "0.49563813", "0.49298856", "0.4904389", "0.48965108", "0.48948178", "0.48883164", "0.4885899", "0.48650318", "0.4860141", "0.48574194", "0.48364282", "0.4834244", "0.4824798", "0.48247302", "0.4824444", "0.48207685", "0.481688", "0.48151797", "0.48131862", "0.48108038", "0.48028958", "0.47972873", "0.4793429", "0.4790836", "0.47899812", "0.4789341", "0.4782031", "0.4781734", "0.47784972", "0.47779304", "0.4776739", "0.47682944", "0.47558585", "0.47456038", "0.47324058", "0.47273564", "0.47249174", "0.47214326", "0.47194278", "0.47140756", "0.47134006", "0.47095677", "0.47075638", "0.4697006", "0.46915874", "0.46912986", "0.46910357", "0.46823382", "0.46816707", "0.4679986", "0.46770063", "0.46769938", "0.4669975", "0.46693408", "0.46643782", "0.46610403", "0.46578038", "0.46563643", "0.46560243", "0.46556658", "0.46544304", "0.46519777", "0.4645359", "0.46417558", "0.46413654", "0.4628108", "0.46268317", "0.46230987", "0.46202794", "0.46168664", "0.46165717", "0.46160415", "0.46130055", "0.4611999" ]
0.60312283
1
Complete this entry from an invalid base entry to a full log entry.
def complete(self, app_id, vin=None, time_unix=None, level=None, log_message=None, gps_position=None, log_id=None, intrusion=None): self.set_any(vin=vin, app_id=app_id, level=level, log_message=log_message, gps_position=gps_position, time_unix=time_unix, log_id=log_id, intrusion=intrusion)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handleAppendEntry(self, leader_id, leader_term, leader_prev_log_idx,\n leader_prev_log_term, entries, leader_commit_idx):\n _, my_prev_log_idx = self.getLatest()\n if self.current_term > leader_term:\n success = False\n else:\n self.current_term = max(self.current_term, leader_term)\n self.stepDown(leader_id)\n if my_prev_log_idx < leader_prev_log_idx \\\n or self.log[leader_prev_log_idx].term != leader_prev_log_term:\n logging.debug('log inconsistent with leader at {}'\n .format(leader_prev_log_idx))\n success = False\n\n else:\n # remove all entries going after leader's last entry\n if my_prev_log_idx > leader_prev_log_idx:\n self.log = self.log[:leader_prev_log_idx+1]\n logging.debug('remove redundent logs after {}'\n .format(leader_prev_log_idx))\n # Append any new entries not already in the log\n for entry in entries:\n logging.debug('adding {} to log'.format(entry))\n self.log.append(entry)\n # check the leader's committed idx\n if leader_commit_idx > self.commit_idx:\n old_commit_idx = self.commit_idx\n self.commit_idx = leader_commit_idx\n map(self.commitEntry,\n self.log[old_commit_idx+1:leader_commit_idx+1])\n logging.debug('comitting upto {}'.format(leader_commit_idx))\n success = True\n dictobj = {'current_term': self.current_term, 'voted_for': self.voted_for, 'log': self.log}\n filename = \"./state\"+self.datacenter_id+'.pkl'\n fileobj = open(filename, 'wb')\n pickle.dump(dictobj, fileobj)\n fileobj.close()\n # reply along with the lastest log entry\n # so that the leader will know how much to update the\n # nextIndices record\n # if failed, reply index of highest possible match:\n # leader_prev_log_idx-1\n self.server.appendEntryReply(leader_id, self.current_term, success,\n self.getLatest()[1] if success\n else leader_prev_log_idx-1)", "def fixup(self):\n raise Exception(\"Fixup not implemented yet!\")", "def _insert_into_clean(self, entry):\n i = entry.hash\n new_entry = self.table[i]\n while new_entry.key is not None:\n i += self.second_hash(new_entry.key)\n new_entry = self.table[i]\n new_entry.key = entry.key\n new_entry.value = entry.value\n new_entry.hash = entry.hash\n self.used += 1\n self.filled += 1", "def handleAppendEntryReply(self, follower_id, follower_term, success,\n follower_last_index):\n if follower_term > self.current_term:\n self.current_term = follower_term\n dictobj = {'current_term': self.current_term, 'voted_for': self.voted_for, 'log': self.log}\n filename = \"./state\"+self.datacenter_id+'.pkl'\n fileobj = open(filename, 'wb')\n pickle.dump(dictobj, fileobj)\n fileobj.close()\n self.stepDown()\n return\n # if I am no longer the leader, ignore the message\n if not self.isLeader(): return\n # if the leader is still in it's term\n # adjust nextIndices for follower\n if self.nextIndices[follower_id] != follower_last_index + 1:\n self.nextIndices[follower_id] = follower_last_index + 1\n logging.debug('update nextIndex of {} to {}'\n .format(follower_id, follower_last_index + 1))\n if not success:\n self.sendAppendEntry(follower_id)\n return\n # check if there is any log entry committed\n # to do that, we need to keep tabs on the successfully\n # committed entries\n self.loggedIndices[follower_id] = follower_last_index\n # find out the index most followers have reached\n majority_idx = self.maxQualifiedIndex(self.loggedIndices)\n logging.debug('the index logged by majority is {0}'\n .format(majority_idx))\n # commit entries only when at least one entry in current term\n # has reached majority\n if self.log[majority_idx].term != self.current_term:\n return\n # if we have something to commit\n # if majority_idx < self.commit_idx, do nothing\n if majority_idx != self.commit_idx:\n logging.info('log committed upto {}'.format(majority_idx))\n old_commit_idx = self.commit_idx\n self.commit_idx = max(self.commit_idx, majority_idx)\n map(self.commitEntry, self.log[old_commit_idx+1:majority_idx+1])", "def unmoving_update_log(self):\n self.log.append(self.log[-1])", "def flatten_log(self, log):\n pass", "def fix_nonerrors(self):\n if not self.only_error:\n return\n self.line = None\n self.filename = None", "def append_record_failure():\n\t\tpass", "def __exit__(self, _type, value, traceback):\n try:\n if not self.record_finish:\n return\n print >>sys.stderr, 'record', self.record\n dutset = {'last_finish_time':time()}\n if not self.record:\n return\n upd = {'end_time': time(), 'modification_time':time()}\n\n if value: # i.e. , if test failed:\n upd['failure'] = repr(value)\n upd['exception'] = value.__class__.__name__\n if not isinstance(value, KeyboardInterrupt):\n print 'HEADLINE: exception', upd['exception'], value\n for clause in format_exception(_type, value, traceback):\n for line in clause.split('\\n'):\n print 'CRASH:', line\n else:\n upd['infrastructure_problem'] = True\n upd['whiteboard'] = '[infrastructure] test interrupted'\n if self.reinstall_on_failure:\n dutset['test_failed'] = True\n tnext = time() + 300\n print 'INFO: test failed, so will reinstall machine at', \\\n asctime(localtime(tnext))\n\n if self.failed: #some test suite failed\n upd['failure'] = 'test failed'\n\n self.mdb.results.update({'_id':self.result_id}, {'$set':upd})\n classify = process_result(self.mdb.results.find_one({'_id':self.result_id}))\n print 'HEADLINE:', classify, self.full_description()\n\n get_track().updates.save({'result_id':self.result_id,\n 'action':'experiment finished'})\n\n if self.dut_id:\n self.mdb.duts.update({'_id':self.dut_id}, \n {'$unset': {'control_pid':1, 'result_id':1,\n 'control_command_line':1},\n '$set': dutset})\n if self.build:\n recount(self.build)\n if classify == 'infrastructure_problems':\n pass\n else:\n col = 'green' if classify == 'passes' else 'red'\n finally:\n if self.record_queue:\n self.record_queue.put('finish')\n self.record_queue.close()\n self.record_queue.join_thread()\n if self.stream_process:\n self.stream_process.join()\n if self.stdout_filter:\n self.stdout_filter.del_callback(self)", "def complete(self):\n pass", "def sync_entry(self, entry):", "def mark_datarun_complete(self, datarun_id):\n datarun = self.get_datarun(datarun_id)\n datarun.status = RunStatus.COMPLETE\n datarun.end_time = datetime.now()", "def redo(self):\n pass", "def clearEntry(self, entry):\n if entry.isDirty():\n self.writes_to_disk += 1\n entry.clear()", "def finish_hour(self):\n\t\tassert len(self.values) >= 4, 'A fully formed update date is needed.'\n\t\tself.values = self.values[:4]", "def reset(self):\n self.last_line_was_empty = True", "def fix_ale_entries(entries):\n if entries[0].fields[2] != \"gene\":\n print str(entries[0]).strip()\n raise Exception, \"Entries do not start with gene.\"\n if entries[-1].fields[2] == \"gene\":\n raise Exception, \" Entries set spills over to next gene at: %s\" %(str(entries[0]))", "def flush(self):\n for k, l in self.logs.items():\n self.full_logs[k].extend(l)\n self.logs = dict()", "def complete_write_transaction(self) -> None:\n self.batch.__exit__(*sys.exc_info())\n self.batch = self.genes.batch_writer()", "def commitEntry(self, entry):\n logging.info('entry committing! {}'.format(entry))\n # check if the entry is a buy ticket entry\n # if so, and if I am the leader, send a message to client about\n # the operation being successful\n if entry.command and 'ticket_count' in entry.command:\n ticket = entry.command['ticket_count']\n if self.isLeader():\n self.server.sendMessage(\n {'port': entry.command['client_port']},\n ('Here is your tickets, remaining tickets %d' % (self.total_ticket - ticket))\n if self.total_ticket >= ticket else 'Sorry, not enough tickets left')\n if self.total_ticket >= ticket:\n self.total_ticket -= ticket\n logging.info('{0} ticket sold to {1}'.format(\n ticket, entry.command['client_id']))\n elif entry.command and 'config' in entry.command:\n if entry.command['config'] == 'joint':\n # when the joint command is committed, the leader should\n # add a new config into log entry and broadcast it to all\n # datacenteres\n # for none leander, it doesn't change anything\n if self.isLeader():\n self.log.append(LogEntry(self.current_term, len(self.log),\n {'config': 'single',\n 'data': entry.command['data'][1]}))\n # send the updated message to all servers, including\n # the ones that are in the old configuration\n self.sendHeartbeat()\n else:\n if self.isLeader():\n self.sendHeartbeat(ignore_last=True)\n # when a single config is committed, the datacenter should\n # check whether it is in the new config\n # if not, it need to retire itself\n # print('---!!!!', self.getAllCenterID())\n if self.datacenter_id not in self.getAllCenterID():\n logging.info('retire itself')\n exit(1)", "def process_entry(self,\n log_entry: str):\n elem = ET.fromstring(log_entry)\n rev = elem.attrib['revision']\n values = {}\n for sub in ['author', 'date', 'msg']:\n try:\n values[sub] = elem.find(f'./{sub}').text\n except (AttributeError, SyntaxError):\n log.warning('failed to retrieve %s in %s', sub, log_entry)\n values[sub] = None\n if values['msg']:\n values['msg'] = values['msg'].replace('\\n', ' ')\n rel_url_slash = self.relative_url + '/'\n for path_elem in elem.findall('*/path'):\n other = {}\n for sub in ['text-mods', 'kind', 'action', 'prop-mods',\n 'copyfrom-rev', 'copyfrom-path']:\n try:\n other[sub] = path_elem.attrib[sub]\n except (AttributeError, SyntaxError, KeyError):\n other[sub] = np.nan\n try:\n path = path_elem.text.replace(rel_url_slash, '')\n except (AttributeError, SyntaxError, ValueError) as err:\n log.warning(f'{err} processing rev {rev}')\n path = None\n entry = scm.LogEntry(rev, values['author'], to_date(values['date']),\n path=path, message=values['msg'],\n textmods=to_bool(other['text-mods']),\n kind=other['kind'], action=other['action'],\n propmods=to_bool(other['prop-mods']),\n copyfromrev=other['copyfrom-rev'],\n copyfrompath=other['copyfrom-path'],\n added=np.nan, removed=np.nan)\n yield entry", "def completeMerge(self):\n #--Remove lists that aren't the sum of at least two esps.\n srcMods = self.srcMods\n for levls in (self.levcs,self.levis):\n for listId in levls.keys():\n if len(srcMods[listId]) < 2 or levls[listId].isDeleted:\n self.records.remove(levls[listId])\n del levls[listId]\n del srcMods[listId]\n #--Log\n log = self.log\n for label, levls in (('Creature',self.levcs), ('Item',self.levis)):\n if not len(levls): continue\n log.setHeader(_('Merged %s Lists:') % (label,))\n for listId in sorted(levls.keys(),key=lambda a: a.lower() ):\n log(listId)\n for mod in srcMods[listId]:\n log(' '+mod)", "def parse_log_entry(self, logstring):\n\n splitLogInfo = logstring.partition(self.LOGFILE_PREFIX)\n if len(splitLogInfo[1]) == 0:\n raise errorhandler.LogDatabaseError(\"separator {} not found in log entry\".format(self.LOGFILE_PREFIX))\n str2 = splitLogInfo[2]\n\n entrytype = None\n for k, v in self.validpostfixes.items():\n if splitLogInfo[2][0:len(k)] == k:\n entrytype = v\n break\n if entrytype is None:\n raise errorhandler.LogDatabaseError(\"Invalid log type: {}\".format(splitLogInfo[2][0:10]))\n\n try:\n timestringtrimmed = logstring.partition(\".\")[0]\n timestamp = datetime.datetime(*time.strptime(timestringtrimmed, \"%Y-%m-%dT%H:%M:%S\")[:6])\n except ValueError:\n raise errorhandler.LogDatabaseError(\"Value error parsing timestamp out of log entry\")\n\n mactokens = {\n \"MAC source\": \"MAC source = \",\n \"MAC dest\": \"MAC dest = \",\n }\n indices = []\n lastidx = 0\n for k, v in mactokens.items():\n nextidx = str2.find(v, lastidx)\n if nextidx < 0:\n raise errorhandler.LogDatabaseError(\"{} not found in log entry\".format(k))\n indices.append(nextidx + len(v))\n lastidx = nextidx\n srcMAC = str2[indices[0] : indices[0] + 17]\n dstMAC = str2[indices[1] : indices[1] + 17]\n\n iptokens = {\n \"IP source\": \"IP SRC=\",\n \"IP dest\": \"IP DST=\",\n \"IP source port\": \"SPT=\",\n \"IP dest port\": \"DPT=\"\n }\n if entrytype == LogEntryType.UNKNOWN_IP or entrytype == LogEntryType.IP_TRAFFIC_IN \\\n or entrytype == LogEntryType.IP_TRAFFIC_OUT or entrytype == LogEntryType.DROP:\n for k, v in iptokens.items():\n nextidx = str2.find(v, lastidx)\n if nextidx < 0:\n raise errorhandler.LogDatabaseError(\"{} not found in log entry\".format(k))\n indices.append(nextidx + len(v))\n lastidx = nextidx\n\n srcIP = extract_ip(str2, indices[2])\n dstIP = extract_ip(str2, indices[3])\n srcPort = str2[indices[4]:].partition(\" \")[0]\n dstPort = str2[indices[5]:]\n else:\n srcIP = \"\"\n dstIP = \"\"\n srcPort = \"\"\n dstPort = \"\"\n\n logdataentry = LogDataEntry(entry_type=entrytype, timestamp=timestamp, srcMAC=srcMAC, dstMAC=dstMAC, srcIP=srcIP, dstIP=dstIP,\n srcPort=srcPort, dstPort=dstPort)\n return logdataentry", "def test_message_truncated_correctly_commit_log_entry(self):\n commit = collection_models.CollectionCommitLogEntryModel.create(\n 'b', 0, 'committer_id', 'a', 'a' * 400, [{}],\n constants.ACTIVITY_STATUS_PUBLIC, False)\n commit.collection_id = 'b'\n commit.update_timestamps()\n commit.put()\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)\n\n # Ensure nothing happens to messages of proper length.\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)", "def set_main_from_log(self, *args):\n if self.logarithmic:\n self.adjustment.props.value = \\\n self.smart_unlog(self.scale.props.adjustment.props.value)", "def __init__(self, entry):\n \n self.lastChangedDate = entry.time\n self.size = entry.size\n self.kind = entry.kind\n self.logMessage = None", "def forwardCompleteMessage(self, msgRepr):\r\n self._incompleteMsgs.remove(msgRepr)\r\n self._protocol.processCompleteMessage(msgRepr.msg)", "def set_base_length_entry(self, base_length):\n self.entries[\"ent_base_length\"].delete(0, END)\n self.entries[\"ent_base_length\"].insert(\n 0, str(base_length))", "def emit(self, record):\n try:\n if self.check_base_filename(record):\n self.build_base_filename()\n logging.FileHandler.emit(self, record)\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n self.handleError(record)", "def set_complete_address(self, complete_address):\n self.complete_address = complete_address", "def finish(self):\n if self.state == STATE_FINISH_ERROR:\n self.on_error('Something went wrong. :( Please see log.')\n else:\n self.on_finish()\n self.log_file.close()\n self.state = STATE_TERMINAL", "def end(self):\n self._log.debug('doing ..')\n super().end()\n\n self._log.debug('done')", "def complete(self, item, line_reference, status):\n self.job.complete(item, line_reference, status)", "def complete(self):\n\t\tres = getattr(self._client, \"complete_\" + self.method)(self)\n\t\tself._create_fields(res)", "def preprocess_entry(self, entry):\r\n raise NotImplementedError('BaseDataSource::preprocess_entry not implemented.')", "def _register_entry(self, entry_txt: str, old: bool) -> None:\n try:\n entry = LogEntry(entry_txt)\n for task in self.tasks:\n if old:\n task.register_old_entry(entry)\n else:\n task.register_entry(entry)\n except RuntimeError:\n print(\"error: invalid log entry:\", entry_txt, file=sys.stderr, flush=True)", "def refund_unmatched_entry(self, entry):\n\n # This entry should be one that was not matched, make sure it is not in a contest.\n if entry.contest is not None:\n raise UnmatchedEntryIsInContest(entry)\n\n # Refund the entry.\n return self.__refund_entry(entry)", "def finalize(self):\n self.clear()\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def _fill_result(self):\n # To be overrided in child\n raise Exception(\"Must override in child.\")", "def clear(self) -> None:\n self._last_err = 0", "def _get_cleaned_logs(self, log, logstart, logend):\n start = log.find(logstart) + len(logstart)\n normal_log = log[start:].replace(logend, '')\n if normal_log.strip() != '' or self.session.run_counter == 1:\n return normal_log\n lastlogend = self.LOGEND%(self.session.uuid, self.session.run_counter - 1)\n start = log.find(lastlogend) + len(lastlogend)\n return log[start:].replace(logstart, '').replace(logend, '')", "def adjust_entry_ldap(entry, base_path):\n if 'mail' not in entry:\n return\n mail = entry['mail'][0]\n\n # remove attribute if exist\n rm_keys = ['modifytimestamp', 'birthyear', 'birthday']\n for rm_key in rm_keys:\n if rm_key in entry:\n del entry[rm_key]\n\n # append attribute\n if 'cn' not in entry:\n entry['cn'] = [mail]\n\n if 'sn' not in entry:\n entry['sn'] = [mail]\n\n # replace attribute\n if 'dn' in entry:\n entry['dn'] = [f'mail={mail},{base_path}']", "def _finish_element(self):\n assert self.currentelem.indexend is True\n self.currentelem.indexend = self._parser.CurrentByteIndex + self.baseposition\n self.currentelem = self.currentelem.parent", "def copy(log_entry):\n\n\t\tassert(isinstance(log_entry, LogEntry))\n\t\treturn LogEntry.from_data(log_entry.data, log_entry.intrusion)", "async def autoformat_responsible(self,\n log_message: discord.Message,\n targeted: discord.Member,\n action: str,\n format_to: '(entry: discord.AuditLogEntry) -> str' = None, *,\n departure: bool = False,\n departure_extra: str = None,\n departure_emoji: str = None):\n if not log_message:\n # no log message to edit...\n return\n\n audit_log_entry = await self.get_responsible(log_message.guild, action, target=targeted)\n\n if not audit_log_entry:\n # couldn't find audit log entry...\n return\n\n if departure:\n # formatting using departure\n\n # [banned] by [user (user id)] [with no attached reason|with reason blah blah...]\n verb = f'{departure_extra} by {describe(audit_log_entry.user)} {self.format_reason(audit_log_entry)}'\n fmt = self.format_member_departure(targeted, verb=verb, emoji=departure_emoji)\n await log_message.edit(content=self.modlog_msg(fmt))\n elif format_to:\n await log_message.edit(content=self.modlog_msg(format_to(audit_log_entry)))", "def clear_entry(event):\n\tbackground = event.widget.config()['background'][-1];\n\tif(background==ERROR_COLOR):\n\t\tevent.widget.delete(0, \"end\")\n\t\tevent.widget.config(bg=WHITE)\n\tfeedback.config(text=\"\", fg=ERROR_COLOR);", "def malformed(self, malformed):\n\n self._malformed = malformed", "def complete(self):\n self._is_complete = True", "def end(self):\n self.set_initial_offset(1e6)", "def _log_process(self, log_req):\n rq_size = log_req.multipart_size\n with self._lock:\n if self._payload_size + rq_size >= self.max_payload_size:\n if len(self._batch) > 0:\n self._send_batch()\n self._batch.append(log_req)\n self._payload_size += rq_size\n if len(self._batch) >= self.max_entry_number:\n self._send_batch()", "def set_complete(self):\n self._current = self._max", "def _auto_clear_log(self):\n if self.log_size() > self.MAX_LOGSIZE:\n self.clear()", "def parse_raw_entry(raw_entry):\n entry_start = raw_entry[0]\n\n # get the timestamp\n ts_len = 23\n ts = entry_start[:ts_len]\n # get the IP, if there is one\n idx = entry_start.find(' ', ts_len+1)\n ip = entry_start[ts_len+1:idx]\n # get the database, if there is one\n consumed = idx\n idx = entry_start.find(' ', consumed+1)\n db = entry_start[consumed+1:idx]\n # get the log type\n consumed = idx\n idx = entry_start.find(' ', consumed+1)\n type = entry_start[consumed+1:idx]\n # finally, combined the message\n consumed = idx\n remaining = entry_start[consumed+1:]\n foo = [remaining]\n foo.extend(raw_entry[1:])\n msg = ''.join(foo).strip()\n\n return Entry(ts, ip, db, type, msg)", "def _add_line(self, key, info):\n info = copy.deepcopy(info)\n anticipated_bus = self._get_df_with_new_elements(\"bus\")\n new_lines = []\n required = {\"from_bus_id\", \"to_bus_id\"}\n xor_sets = {(\"capacity\", \"Pmax\"), (\"capacity\", \"Pmin\")}\n optional = {\"Pmin\"}\n for i, line in enumerate(info):\n self._check_entry_keys(line, i, key, required, xor_sets, optional)\n start = line[\"from_bus_id\"]\n end = line[\"to_bus_id\"]\n if start not in anticipated_bus.index:\n raise ValueError(\n \"No bus with the following id for line #%d: %d\" % (i + 1, start)\n )\n if end not in anticipated_bus.index:\n raise ValueError(\n \"No bus with the following id for line #%d: %d\" % (i + 1, end)\n )\n if start == end:\n raise ValueError(f\"to/from buses of line #{i + 1} must be different\")\n if \"capacity\" in line:\n if not isinstance(line[\"capacity\"], (int, float)):\n raise ValueError(\"'capacity' must be a number (int/float)\")\n if line[\"capacity\"] < 0:\n raise ValueError(\"capacity of line #%d must be positive\" % (i + 1))\n # Everything looks good, let's translate this to Pmin/Pmax\n line[\"Pmax\"] = line[\"capacity\"]\n line[\"Pmin\"] = -1 * line[\"capacity\"]\n del line[\"capacity\"]\n elif {\"Pmin\", \"Pmax\"} < set(line.keys()):\n if key == \"new_branch\":\n err_msg = \"Can't independently set Pmin & Pmax for AC branches\"\n raise ValueError(err_msg)\n for p in {\"Pmin\", \"Pmax\"}:\n if not isinstance(line[p], (int, float)):\n raise ValueError(f\"'{p}' must be a number (int/float)\")\n if line[\"Pmin\"] > line[\"Pmax\"]:\n raise ValueError(\"Pmin cannot be greater than Pmax\")\n else:\n raise ValueError(\"Must specify either 'capacity' or Pmin and Pmax\")\n if (\n key == \"new_branch\"\n and anticipated_bus.interconnect[start]\n != anticipated_bus.interconnect[end]\n ):\n raise ValueError(\n \"Buses of line #%d must be in same interconnect\" % (i + 1)\n )\n elif (\n anticipated_bus.lat[start] == anticipated_bus.lat[end]\n and anticipated_bus.lon[start] == anticipated_bus.lon[end]\n ):\n raise ValueError(\"Distance between buses of line #%d is 0\" % (i + 1))\n new_lines.append(line)\n\n if key not in self.ct:\n self.ct[key] = []\n self.ct[key] += new_lines", "def _fill_feedback(self):\n # To be overrided in child\n raise Exception(\"Must override in child.\")", "def abort_due_to_other(self) -> None:\n global failure_aborts_build # pylint: disable=invalid-name\n if Logger.errors and failure_aborts_build.value:\n self.abort(\"Aborting due to previous error\")", "def complete_todo(self, todo: Todo):\n todo.completed = True\n self.todo_client.put_todo(todo)", "def error_false(self):\n self.errors = self.errors[0:-1]\n if not self.errors:\n self.update_info()", "def completeStep(self):\n self.chunk_percentage[self.current_step - 1] = self.current_chunk_size\n self.progress_updated.emit(self.percentage)\n if self.total_steps == self.current_step:\n self.initialized = False", "def reBuild(self): # redefine the rebuild method for loss function (polymorphism)\n self.updateRange()\n self.buildLine()\n self.normalize() # normalize loss function to have total area of 1 ", "def full_clean(self):\n self._errors = ErrorDict()\n if not self.is_bound: # Stop further processing.\n return\n self.cleaned_data = {}\n if self.empty_permitted and not self.has_changed():\n self.cleaned_data = None\n return\n for name, field in self.fields.items():\n self.clean_field(name, field)\n try:\n self.cleaned_data = self.clean()\n except ValidationError, e:\n self._errors[NON_FIELD_ERRORS] = e.messages\n if self._errors:\n delattr(self, 'cleaned_data')", "def complete(self, x):\n self._check_input(x)\n self._check_missing_value_mask(isnull(x))\n x, missing_mask = self.prepare_input_data(x)\n\n x_zero_replaced = self.fill(x.copy(),missing_mask,'zero')\n if self.normalizer is not None:\n normalizer = NORMALIZERS[self.normalizer]\n x_zero_replaced, min_record, max_record = normalizer(x_zero_replaced)\n\n x_filled = self.solve(x_zero_replaced, missing_mask)\n revocer = RECOVER[self.normalizer]\n x_filled = revocer(x_filled, min_record, max_record)\n return x_filled", "def end(self, e):\n if e is not None:\n if self.charMap[e[0]][e[1]].c in [\"1\", \"3\"]: # wall or start\n print(\"[Error] Invalid end position.\", file=sys.stderr)\n raise UserInputException\n try:\n self.charMap[e[0]][e[1]] = CharMapCell(4)\n except IndexError:\n print(\"[Error] Invalid end position.\", file=sys.stderr)\n raise UserInputException\n self.__end = e", "def addFailure(self, result):\n result.addFailure(self, (Exception, Exception(), None))\n # Since TAP will not provide assertion data, clean up the assertion\n # section so it is not so spaced out.\n test, err = result.failures[-1]\n result.failures[-1] = (test, \"\")", "def complete(self, view, cursor_pos, show_errors):\n raise NotImplementedError(\"calling abstract method\")", "def remove_record_failure():\n\t\tpass", "def __exit__(self, exc_type, exc_value, tb):\n self.logger.handlers = self.old_handlers\n self.logger.propagate = self.old_propagate\n self.logger.setLevel(self.old_level)\n if exc_type is not None:\n # let unexpected exceptions pass through\n return False\n for record in self.watcher.records:\n self._raiseFailure(\n \"Something was logged in the logger %s by %s:%i\" %\n (record.name, record.pathname, record.lineno))", "def mark_failed(self):\n self.status = self.FAILED\n self.traceback = self._format_traceback()\n self.save(update_fields={'status', 'traceback', 'updated_at'})", "def mark_succeed(self):\n self.status = self.SUCCEED\n self.traceback = None\n self.save(update_fields={'status', 'traceback', 'updated_at'})", "def process_contents(self):\n if self.append_exit:\n #filtering possible duplicate exit entries\n for entry in self.contents:\n if len(entry) > 1 and entry[1] == 'exit':\n self.contents.remove(entry)\n self.contents.append(self.exit_entry)\n logging.debug(\"{}: contents processed\".format(self.name))", "def end_logging(self):\n self.append_to_logfile()", "def writeCompletelog(self, locallog, remotelog):\n\n # pause the bot from parsing, because we don't\n # want to parse the log from the beginning\n if self.console._paused is False:\n self.console.pause()\n self.debug('Pausing')\n # Remove last line if not complete\n i = remotelog.rfind ('\\r\\n')\n remotelog = remotelog[:i + 2]\n # remove any blank lines\n while remotelog[-4:-2] == '\\r\\n':\n remotelog = remotelog[:-2]\n \n # use Just a baka's lazy cursor\n self.lastlines = remotelog[-1000:]\n\n # create or open the local log file\n if self._logAppend:\n output = open(locallog, 'ab')\n else:\n output = open(locallog, 'wb')\n\n output.write('\\r\\n')\n output.write('B3 has restarted writing the log file\\r\\n')\n output.write('\\r\\n')\n output.close()\n\n self.info('remote log downloaded successfully')\n\n # we can now start parsing again\n if self.console._paused:\n self.console.unpause()\n self.debug('unpausing')", "def on_entrustAccount_editingFinished(self):\n # TODO: not implemented yet\n # raise NotImplementedError\n # raise NotImplementedError\n address = self.entrustAccount.text()\n match, valid_address = MATRIXCmd.checkAddressValid(address)\n\n if match:\n # 使用Match获得分组信息\n print(\"entrust A1 account Ok.\")\n self.WalletAddressLabel.setText('A1账户正常')\n self.a1_Address = valid_address\n depoly_msg = '抵押的账户为:' + valid_address\n print(f\"{depoly_msg}\")\n else:\n self.WalletAddressLabel.setText('抵押A1账户不正常,格式为MAN.XXXXX')\n print(\"entrust a1 account Error.\")\n\n self.entrustAccount.clear()\n # self.entrustAccount.setFocus()\n return", "def inner(exc, output, key, value, **kwargs):\n recid = output.get('recid', None) or output['legacy_recid']\n cli_logger.error(\n '#RECID: #{0} - {1} MARC FIELD: *{2}*, input value: {3}, -> {4}, '\n .format(recid, exc.message, key, value, output)\n )\n logger.add_log(exc, key, value, output, **kwargs)", "def error(self, obj) -> None:\n if isinstance(obj, str) and obj in self:\n self.__err.extend(self.pop(obj))\n else:\n self.__err.append(obj)", "def adv_path_finish(self):\n path_slice = slice(self.path_start_idx, self.ptr)\n ep_rews = self.rew_buf[path_slice]\n ep_vals = self.val_buf[path_slice]\n ep_ret = utils.reward_to_go(ep_rews)\n self.ret_buf[path_slice] = ep_ret\n self.adv_buf[path_slice] = ep_ret - ep_vals", "def update(self, idx, new_error):\n self.buffer.update(idx, self.priority(new_error))", "def _diagnose_error(self, handle, data_record):\n line = handle.readline()\n\n while line:\n # 'Searchingdone' instead of 'Searching......done' seems\n # to indicate a failure to perform the BLAST due to\n # low quality sequence\n if line.startswith('Searchingdone'):\n raise LowQualityBlastError(\"Blast failure occured on query: \",\n data_record.query)\n line = handle.readline()", "def load(self):\n self.out.delete('1.0', END)\n self.err.delete('1.0', END)\n self.out.insert('1.0', '\\n'.join(errorlog.out.lines))\n self.err.insert('1.0', '\\n'.join(errorlog.err.lines))", "def backfill (self):\n mods = self.get_mods_instance()\n # if self.verbose:\n # print 'BEFORE'\n # mods.show_notes()\n try:\n mods.do_back_fill(self.award_ids)\n # print mods\n # print 'AFTER'\n # mods.show_notes()\n except:\n print 'ERROR: {}'.format(sys.exc_info()[1])\n traceback.print_exc()", "def set_from_entry(self, entry):\n self.type_cls = type(entry)\n\n self.description = entry.description\n self.updated = entry.updated\n self.notes = entry.notes\n for field in entry.entry_fields:\n self._update_property(field, entry.properties[field])", "def _trunc_lines(self):\n\t\tif self._appendMessages:\n\t\t\tself._trunc_lines_append()\n\t\telse:\n\t\t\tself._trunc_lines_prepend()", "def __exit__(self, exec_type, value, traceback): # suppress(no-self-use)\n IndentedLogger.__exit__(exec_type, value, traceback)\n Task.nest_level -= 1", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended[0]", "def trailing_stop_loss_on_fill(self, trailing_stop_loss_on_fill):\n\n self._trailing_stop_loss_on_fill = trailing_stop_loss_on_fill", "def trailing_stop_loss_on_fill(self, trailing_stop_loss_on_fill):\n\n self._trailing_stop_loss_on_fill = trailing_stop_loss_on_fill", "def _deduplicate(self):\n if self._clean:\n return\n\n sorted_entries = sorted(\n self._entries, key=lambda entry: (entry.depth, -len(entry.tail))\n )\n\n self._entries = []\n for entry in sorted_entries:\n if any(entry.startswith(p) for p in self._entries):\n continue\n self._entries.append(entry)\n self._clean = True", "def _post_clean(self):\r\n super(NgModelFormMixin, self)._post_clean()\r\n if self._errors and self.prefix:\r\n self._errors = ErrorDict((self.add_prefix(name), value) for name, value in self._errors.items())", "async def autoformat_responsible_nontarget(self, log_message: discord.Message, *, action: str, base: str):\n action = await self.get_responsible(log_message.guild, action=action)\n\n if action:\n await log_message.edit(content=self.modlog_msg(f'{base} by {describe(action.user)}'))", "def update(self, line):", "def flush(self):\n self.old_stderr.flush()", "def complete_job(self, command_dict):\n job_uuid = command_dict['job_uuid']\n try:\n job = Job[job_uuid]\n except KeyError as e:\n # Job not found is not worth re-raising\n logger.warn(e)\n logger.warn(\"Job {} missing\".format(job_uuid))\n return\n\n logger.info(\"job {} finished with status of {}\".format(job.uuid,\n job.status))\n # Get the job log from the worker\n logger.info(\"retrieving log for job {}\".format(job.uuid))\n job_data_dir = os.path.join(self.data_dir, job.uuid)\n if(not os.path.exists(job_data_dir)):\n os.mkdir(job_data_dir)\n\n fetch_file_from_url(job.log_url(), job_data_dir)\n\n # Now get the job output data from the worker\n if(job.status == Job.STATUS_PROCESSED):\n\n logger.info(\"retrieving output for job {}\".format(job.uuid))\n fetch_file_from_url(job.download_url(), job_data_dir)\n job.status = Job.STATUS_COMPLETE\n\n job.on_primary = True\n # save job\n Job[job.uuid] = job", "def _log_append(self, msg):\n\t\tp = self._edit.get_buffer()\n\t\tstart,end = p.get_bounds()\n\t\tp.insert(end, msg)\n\t\tself._trunc_lines()\n\t\tself._edit.scroll_to_iter(p.get_end_iter(), 0.0)", "def update_log(self, log_name, log):\n old_log = self.get_log(log_name)\n if old_log.depth == log.depth:\n self.data_frame[\"{}({})\".format(\n old_log.descr.replace(' ', '_'), old_log.units)] = log.data\n else:\n raise Warning(\"Mismatch\")", "def rtg_finish_path(self):\n path_slice = slice(self.path_start_idx, self.ptr)\n ep_rews = self.rew_buf[path_slice]\n ep_ret = utils.reward_to_go(ep_rews)\n self.ret_buf[path_slice] = ep_ret\n self.adv_buf[path_slice] = ep_ret", "def complete(self, update):\n self.reactor.callFromThread(self._complete, update)", "def logCommand(self, command):\r\n if self.gagged()['entry']:\r\n self.gag(-1, 'entry')\r\n else:\r\n c = self.getCommands()\r\n if not c or c[-1] != command or self.config.get('entry', 'logduplicatecommands'):\r\n self.writeToLog('command', command)", "def finalize(self):\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def finalize(self):\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def after_return(self, status, retval, task_id, args, kwargs, einfo):\n self.job_db.update_job_state(self.job_id, status.lower())\n if status == 'FAILURE':\n error_object = { 'job_id': self.job_id, 'job_name': self.name, 'message': self.error }\n self.job_db.add_job_error( self.job_id, error_object )\n\n if self.parent_job_id is not None:\n self._propagate_failure_to_ancestors(self.parent_job_id, error_object)\n self.job_db.close()" ]
[ "0.51296234", "0.512334", "0.504368", "0.5036465", "0.48848653", "0.48239094", "0.47969007", "0.47061825", "0.46668997", "0.46590635", "0.45995188", "0.4596216", "0.45927078", "0.45789924", "0.45734036", "0.45707417", "0.45549586", "0.45530573", "0.45396262", "0.45332724", "0.45322424", "0.4484352", "0.44770706", "0.44744158", "0.44556046", "0.44509745", "0.44423926", "0.44346994", "0.44220603", "0.44213164", "0.4415863", "0.4413699", "0.44116303", "0.4411355", "0.44087046", "0.4405775", "0.44024324", "0.44006392", "0.43952623", "0.43923682", "0.4388989", "0.4382126", "0.4378472", "0.43695846", "0.436667", "0.4365666", "0.4360921", "0.43538886", "0.43483415", "0.43448824", "0.43424276", "0.43420547", "0.43363366", "0.43319348", "0.43093255", "0.4306272", "0.43006736", "0.42993408", "0.42922834", "0.4292199", "0.42885357", "0.42857268", "0.4284571", "0.42838144", "0.4282115", "0.42819065", "0.42810005", "0.42803478", "0.42778504", "0.4276572", "0.42760116", "0.42748713", "0.42742282", "0.4271691", "0.4268001", "0.42639396", "0.42554498", "0.42524788", "0.42513135", "0.42419258", "0.42366144", "0.42365652", "0.4234661", "0.42312044", "0.42300949", "0.42300949", "0.42235768", "0.42208904", "0.42153084", "0.4208952", "0.4203819", "0.42026746", "0.41975528", "0.41949537", "0.41913694", "0.41866374", "0.4184401", "0.41837722", "0.41837722", "0.41812024" ]
0.47549245
7
Setter for all fields at once
def set_any(self, vin=None, app_id=None, level=None, log_message=None, gps_position=None, time_unix=None, log_id=None, intrusion=None): self._set_if_not_none(LogEntry.VIN_FIELD, vin) self._set_if_not_none(LogEntry.APP_ID_FIELD, app_id) self._set_if_not_none(LogEntry.LEVEL_FIELD, level) self._set_if_not_none(LogEntry.LOG_MESSAGE_FIELD, log_message) self._set_if_not_none(LogEntry.GPS_POSITION_FIELD, gps_position) self._set_if_not_none(LogEntry.TIME_UNIX_FIELD, time_unix, verifier=self._verify_time) self._set_if_not_none(LogEntry.LOG_ID_FIELD, log_id, verifier=self._verify_uuid) if intrusion is not None: self.intrusion = intrusion
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_all_fields(self, name, value):\n for field in self._field_map.values():\n setattr(field, name, value)", "def setValue(self,val):\n for f,v in zip(self.fields,val):\n f.setValue(v)", "def set(self, **kwargs):\n field_names = self.get_field_names()\n for name, value in kwargs.iteritems():\n if name in field_names:\n setattr(self, name, value)", "def set_all(self, field, value):\n fields = self.find_all(field)\n for f in fields:\n f.value = value", "def setValues(self, fields: str = \"\"):\n pass", "def update_set(self):\n for field in self.children:\n if issubclass(field.__class__, MyTextField):\n val = field.get_field().value\n setattr(self.set, field.get_field().name, val if val != \"\" else None)", "def setValues(self):\n pass", "def setValues(self):\n pass", "def setValues(self):\n pass", "def setValues(self):\n pass", "def setValues(self):\n pass", "def setValues(self):\n pass", "def _set_attributes(self):", "def set_specific_fields(self):\n raise NotImplementedError(\"Must be defined by subclass!\")", "def __setstate__(self,values):\n self.initDefault()\n setter = object.__setattr__\n for value,attr in zip(values,self.persistent):\n setter(self,attr,value)\n if self.dirty_sizeCrc == None:\n self.dirty_sizeCrc = {} #--Use empty dict instead.\n self.refreshDataSizeCrc()", "def updateFromFields(self, fields, data):\n self._fields = fields\n data = [d if d is not None else '' for d in data]\n for field,val in zip(fields, data):\n setattr(self, field, val)", "def populate(self, **kw):\n for name, field in self:\n if name in kw:\n field.__set__(self, kw[name])", "def set_field( self, data ):\n self.val[:] = data[:]\n return", "def _populate(self, fields):\n schema = self.schema\n for k, v in fields.items():\n fields[k] = schema.fields[k].iget(self, v)\n\n self.modify(fields)\n self.reset_modified()", "def set_all_attribute_values(self, value):\n\n for attribute_name, type_instance in inspect.getmembers(self):\n\n if attribute_name.startswith('__') or inspect.ismethod(type_instance):\n # Ignore parameters with __ and if they are methods\n continue\n\n if isinstance(type_instance, bool):\n self.__dict__[attribute_name] = value\n elif isinstance(type_instance, self.__class__):\n type_instance.set_all_attribute_values(value)", "def update(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def _setVals(self, *args, **kwargs):\n pass", "def _replace_fields(self):\n for name, value in self._cleaned_data.items():\n setattr(self, name, value)", "def set(self, **inputs):\r\n for property, value in inputs.items():\r\n try:\r\n setattr(self,property,value)\r\n except:\r\n raise Exception(property + \" keyword argument not recognized\")\r\n\r\n # update values\r\n self._check_attributes()\r\n self._set_functions()", "def set_properties(struct):", "def set(self, **kwargs):\n raise NotImplementedError", "def set_params(self,**kwargs):\n for key in kwargs:\n setattr(self, key, kwargs[key])", "def update(self, values):\r\n for k, v in six.iteritems(values):\r\n setattr(self, k, v)", "def _setAll(self, data):\n super(SummonerModel, self)._setAll(data)", "def update(self, **kwargs):\n for key, value in kwargs.items():\n if key not in self.VALID_NAMES:\n continue\n\n setattr(self, key, value)", "def fields(self, fields):\n\n self._fields = fields", "def update(self, values):\n for k, v in values.items():\n setattr(self, k, v)", "def _set_data(self, new_data):\n for name, field in self._get_fields().items():\n if name in new_data:\n try:\n setattr(self, f\"__{name}\", field.from_raw(new_data[name]))\n except (fields.ValidationError, ValueError):\n # should at least log validation and value errors\n # this can happen in case of e.g. fields type change\n pass", "def __set__(self, obj, value):\r\n pass", "def _set_field_values(self):\n data = self._create_soap_object('ArrayOfArrayOfString')\n\n arr1 = self._create_soap_object('ArrayOfString')\n arr1.string = self.field_values.keys()\n\n arr2 = self._create_soap_object('ArrayOfString')\n arr2.string = self.field_values.values()\n\n data.ArrayOfString.append(arr1)\n data.ArrayOfString.append(arr2)\n\n self.client.service.SetFieldValues(fieldValues=data)", "def update(self, values):\n for k, v in six.iteritems(values):\n setattr(self, k, v)", "def _set_model_field(self):\n self._field_value = hutils.format_json(self._memory_data)\n setattr(self._model, self._field, self._field_value)", "def update(self, *args, **kwargs):\n if kwargs is not None:\n for key, value in kwargs.items():\n setattr(self, key, value)", "def assign_values(self, data):\n\n for key in self.__dict__.keys():\n if key in data.keys():\n setattr(self, key, data[key]) # handy built-in function", "def update_fields(self):\n if hasattr(self.day, \"body_composition\"):\n for f in self.get_fields():\n name = f.get_field().name\n value = getattr(self.day.body_composition, name, None)\n if value is not None:\n f.set_field(value)\n else:\n f.set_field(\"\")", "def copy_fields(self, entity, all_fields=False):\n\n if all_fields:\n fields = self.get_all_fields()\n else:\n fields = self.get_non_pk_fields()\n\n for field in fields.keys():\n setattr(self, field, getattr(entity, field, None))", "def set_values(self,x):\n for i in range(len(self)):\n self[i].set_value(x[i])", "def update(self, *args, **kwargs):\n sqrlist = [\"id\", \"size\", \"x\", \"y\"]\n if args and len(args) != 0:\n for i in range(len(sqrlist)):\n if i < len(args):\n # call to setter method\n setattr(self, sqrlist[i], args[i])\n else:\n if kwargs and len(kwargs) != 0:\n for k in sqrlist:\n for key, value in kwargs.items():\n if k == key:\n setattr(self, key, value)", "def set_attributes(self):\n s = _setter(oself=self, e1=NameError, e2=AttributeError)\n\n s('oself.coef_ = oself.model.coef_')\n s('oself.intercept_ = oself.model.intercept_')\n\n self.time_prepare = None\n s('oself.time_prepare = oself.model.time_prepare')\n self.time_upload_data = None\n s('oself.time_upload_data = oself.model.time_upload_data')\n self.time_fitonly = None\n s('oself.time_fitonly = oself.model.time_fitonly')", "def update_from_kwargs(self, **kwargs):\n for (key, value) in kwargs.items():\n setattr(self, key, value)", "def set_fields(self, fields: FieldDict):\n super().set_fields(fields)\n nested_field: NestedField = self.fields[self.nested]\n if not isinstance(nested_field, NestedField):\n raise TypeError(\n f'The field \"{self.nested}\" must be a NestedField instance, not \"{nested_field}\".')\n if nested_field.many:\n raise ValueError(f'The field \"{self.nested}\" can not be set as \"many=True\".')\n self.nested_field = nested_field\n # create partial methods\n self._do_dump = partial(\n getattr(self, self.dump_method),\n target=nested_field.dump_target,\n method=nested_field.dump,\n )\n self._do_load = partial(\n getattr(self, self.load_method),\n target=nested_field.load_target,\n method=nested_field.load,\n )", "def __set__(self, instance, value):\n # Run process for the nested field type for each value in list\n instance._values[self.name] = [self.field.process(v) for v in value]", "def _SetValue(param, field, value):\n attr = None\n attr_name = ''\n for attr_name in field.split('.'):\n if attr:\n param = attr\n\n if not hasattr(param, attr_name):\n raise ValueError(\"Can't find field %s.\" % field)\n attr = getattr(param, attr_name)\n param.SetField(attr_name, value)", "def part(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)", "def updateAttrs(self, kwargs):\n for k, v in kwargs.iteritems():\n setattr(self, k, v)", "def _modify(self, fields):\n return fields", "def update(self, **kwargs):\n for k, v in kwargs.iteritems():\n if hasattr(self, k):\n setattr(self, k, v)", "def set_fields(self, **kwargs):\n for key, value in kwargs.items():\n if key in self.fields.keys():\n if type(value) != bool:\n raise TypeError('Expecting Bool passed {}'\n .format(type(value)))\n self.fields[key] = value\n else:\n raise KeyError", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def fill(self, **kwargs):\r\n for name in kwargs.keys():\r\n setattr(self, name, kwargs[name])\r\n return self", "def __setstate__(self, state):\n exclude_keys = ['_is_zero', '_is_positive', '_is_nonneg']\n for key,value in state.items():\n if key in exclude_keys:\n continue\n if key == '_field':\n self._init_field(value)\n continue\n self.__dict__[key] = value\n return state", "def __set__(self,obj,val):\n assert len(val) == len(self.attribs),\"Compound parameter '%s' got the wrong number of values (needed %d, but got %d).\" % (self._attrib_name,len(self.attribs),len(val))\n \n if not obj:\n for a,v in zip(self.attribs,val):\n setattr(self.objtype,a,v)\n else:\n for a,v in zip(self.attribs,val):\n setattr(obj,a,v)", "def set_additional_fields(cls, model, data):\n for k, v in data.items():\n if not hasattr(model, k):\n setattr(model, k, v)", "def _checked_set(self, struct, field, value):\n setattr(struct, field, value)\n self._check_field_length(struct.DESCRIPTOR.fields_by_name[field], value)", "def set_fields(self, fields: FieldDict):\n super().set_fields(fields)\n # bind fields to attrs\n for attr in ('a', 'b'):\n setattr(self, f'field_{attr}', self.fields[getattr(self, attr)])\n # get error messages\n dump_error = self.error_cls(self.get_error_message(\n self.op, a=self.field_a.dump_source, b=self.field_b.dump_source))\n load_error = self.error_cls(self.get_error_message(\n self.op, a=self.field_a.load_source, b=self.field_b.load_source))\n # set partial arguments for `validate`\n self.validate_dump = partial(\n self.validate,\n a_key=self.field_a.dump_target,\n b_key=self.field_b.dump_target,\n error=dump_error)\n self.validate_load = partial(\n self.validate,\n a_key=self.field_a.load_target,\n b_key=self.field_b.load_target,\n error=load_error)", "def field_values(self, field_values):\n\n self._field_values = field_values", "def updateData(self,d):\n for f in self.fields:\n n = f.name()\n if n in d:\n f.setValue(d[n])", "def _save(self):\n for attrib in self.attribs:\n setattr(self, attrib, getattr(self.obj, attrib))", "def __setattr__(self, name, value):\n # Special case post-init names. They need to be set after constructor.\n if name in constants._POST_INIT_FIELD_ATTRIBUTE_NAMES:\n object.__setattr__(self, name, value)\n return\n\n # All other attributes must be set before __initialized.\n if not self.__initialized:\n # Not initialized yet, allow assignment.\n object.__setattr__(self, name, value)\n else:\n raise AttributeError('Field objects are read-only')", "def __setattr__(self, name, value):\n # Special case post-init names. They need to be set after constructor.\n if name in _POST_INIT_FIELD_ATTRIBUTE_NAMES:\n object.__setattr__(self, name, value)\n return\n\n # All other attributes must be set before __initialized.\n if not self.__initialized:\n # Not initialized yet, allow assignment.\n object.__setattr__(self, name, value)\n else:\n raise AttributeError('Field objects are read-only')", "def set_attributes(self, settings):\n\n for key, value in settings.items():\n self.__dict__[key] = value", "def update_properties(self, prop_dict):\n ft_dict = {ft.name: ft for ft in self.get_field_types()}\n for name, val in prop_dict.items():\n ft = ft_dict[name]\n if ft.is_parameter():\n key = \"value\"\n else:\n key = \"sample\"\n if issubclass(type(val), Sequence) and ft.array:\n self.set_field_value_array(name, None, [{key: v} for v in val])\n else:\n self.set_field_value(name, None, {key: val})", "def _update_careful (self, dict):\n for attr in dir(self):\n if dict.has_key(attr):\n dval = dict[attr]\n if dval is not None:\n setattr(self, attr, dval)", "def update(self, f):\n\n for p in self.__mapper__.attrs:\n\n if p.key == 'oid':\n continue\n try:\n setattr(self, p.key, getattr(f, p.key))\n\n except AttributeError:\n # The dict() method copies data property values into the main dict,\n # and these don't have associated class properties.\n continue", "def update(self, f):\n\n for p in self.__mapper__.attrs:\n\n if p.key == 'oid':\n continue\n try:\n setattr(self, p.key, getattr(f, p.key))\n\n except AttributeError:\n # The dict() method copies data property values into the main dict,\n # and these don't have associated class properties.\n continue", "def _set_multi_field_values(self):\n for block_key, block in self.block_field_values.items():\n data = self._create_soap_object('ArrayOfArrayOfString')\n names = self._create_soap_object('ArrayOfString')\n names.string = block[0].keys()\n data.ArrayOfString.append(names)\n\n for item in block:\n row = self._create_soap_object('ArrayOfString')\n row.string = item.values()\n data.ArrayOfString.append(row)\n\n self.client.service.SetBlockFieldValues(blockName=block_key, blockFieldValues=data)", "def set_attr(self, name: str, values: Union[list, tuple, object]):", "def auto_populate(self, model):\n for name, val in self._fields.iteritems():\n setattr(model, name, val.data)", "def modify(self, fields=None, **fields_kwargs):\n modified_fields = set()\n fields = self.make_dict(fields, fields_kwargs)\n fields = self._modify(fields)\n for field_name, field_val in fields.items():\n in_schema = field_name in self.schema.fields\n if in_schema:\n setattr(self, field_name, field_val)\n modified_fields.add(field_name)\n\n return modified_fields", "def __setattr__(*args, **kwargs):\n \n pass", "def __setattr__(*args, **kwargs):\n \n pass", "def f_set(self, *args, **kwargs):\n if args and self.v_name is None:\n raise AttributeError(\n \"Cannot set positional value because I do not have a name!\"\n )\n for idx, arg in enumerate(args):\n valstr = self.f_translate_key(idx)\n self.f_set_single(valstr, arg)\n\n for key, arg in kwargs.items():\n self.f_set_single(key, arg)", "def set(self, obj, value):\n pass", "def _assign_fields_to_params(cls, fields, params):\n if fields is None:\n fields = cls.get_default_read_fields()\n if fields:\n params['fields'] = ','.join(fields)", "def f_set(self, data):\n raise NotImplementedError(\"Should have implemented this.\")", "def setVals(self, *args, **kwargs):\n self._setVals(*args, **kwargs)\n self._check_vals()", "def __init__(self, **attributes):\n self.set(**attributes)", "def __post_init__(self):\n for field in dataclasses.fields(self):\n value = getattr(self, field.name)\n if not isinstance(value, field.type) and value:\n try:\n setattr(self, field.name, field.type(value))\n except ValueError:\n raise ValueError(f\"Expected {field.name} \"\n f\"to be {field.type}, \"\n f\"got {repr(value)}\")" ]
[ "0.7270721", "0.7230778", "0.7123487", "0.7027001", "0.7004821", "0.69436187", "0.6810718", "0.6810718", "0.6810718", "0.6810718", "0.6810718", "0.6810718", "0.6793038", "0.6764726", "0.6744737", "0.66250837", "0.6608694", "0.6586342", "0.6529556", "0.65196884", "0.64807785", "0.6453367", "0.64004004", "0.638503", "0.63662785", "0.6299322", "0.62792003", "0.6274516", "0.62626547", "0.6250594", "0.6239453", "0.62192976", "0.62148714", "0.6201416", "0.6198473", "0.6183607", "0.61768645", "0.6163084", "0.6157761", "0.6142081", "0.61411726", "0.6134109", "0.61248565", "0.6119339", "0.60841113", "0.607234", "0.60679394", "0.6050814", "0.6042941", "0.6040246", "0.6035987", "0.6034257", "0.60316885", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.6021712", "0.60159355", "0.5993274", "0.59836644", "0.59809434", "0.5961268", "0.5959477", "0.593748", "0.5932941", "0.5925491", "0.59234446", "0.5914052", "0.5910033", "0.59054506", "0.5893938", "0.5889151", "0.5889151", "0.58787745", "0.58745456", "0.58709246", "0.5858789", "0.58534455", "0.58534455", "0.58448136", "0.58371836", "0.582015", "0.5819218", "0.58085155", "0.5799767", "0.57954067" ]
0.0
-1
Create a log string from this item's log data, sorted by key.
def get_log_string(self): result = json.dumps(self.data, sort_keys=True) if self.intrusion is not None and self.intrusion != "": result += ",{}".format(self.intrusion) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log(data):\n items = []\n for key, value in data.items():\n if value is None:\n items.append('[{}]'.format(key))\n else:\n items.append('[{} {}]'.format(key, value))\n print(' '.join(items))", "def _format_entries(self):\n\n def format_item(key, value):\n if value is None:\n return str(key)\n else:\n return '%s -> %x' % (key, value,)\n\n items = self._entries.items()\n items.sort()\n return '{%s}' % (', '.join([format_item(*item) for item in items]),)", "def write_log(self):\n if self.hash_log_curr:\n temp_dict = {}\n count = 0\n for key, value in self.hash_log_curr.iteritems():\n temp_dict[value[4] + str(count)] = key\n count += 1\n temp_sort = temp_dict.keys()\n temp_sort.sort()\n temp_sort.reverse()\n\n try:\n log = open(self.log_path + r'\\hash_log.txt', 'w')\n # log header\n log.write(self.log_header)\n # write hash_log_content to log\n for key in temp_sort:\n value = self.hash_log_curr[temp_dict[key]]\n log.write(value[0]+'|'+value[1]+'|'+value[2]+'|'+value[3]+'|'+value[4]+'|'+value[5] + '\\n')\n log.close()\n self.print_to_log('New log writen to file: ' + self.log_path + r'\\hash_log.txt' )\n except IOError:\n self.print_to_log('Cannot open log file to write')\n raise\n except:\n self.print_to_log('Unknown Error')\n raise", "def strkey(item):\n return '%s:%s:%s' % (item['group_id'], item['artifact_id'], item['version'])", "def generate_datalogger_csv(logdir, datestring, keys, values, ts_keyname):\n if datestring == datetime.date.today().isoformat():\n logging.error(\"todays Logs are actually written and cannot used in datalogger\")\n return\n headers = [ts_keyname, ] + list(keys) + list(values)\n linebuffer = []\n linebuffer.append(\"\\t\".join(headers)) \n filename = os.path.join(logdir, \"haproxylog_%s.gz\" % datestring)\n logging.info(\"parsing file %s\", filename)\n try:\n parser = parser_generator(keys, values, gzip.open(filename, \"rb\"))\n for line in aggregator(keys, values, ts_keyname, parser):\n linebuffer.append(line)\n except IOError as exc:\n logging.exception(exc)\n return StringIO.StringIO(\"\\n\".join(linebuffer))", "def tostring(self, key=None):\n if key is not None:\n return self._asline(key, self[key])\n else:\n out = ''\n for k in list(self.keys()):\n out += self._asline(k, self[k])\n return out", "def buildheader(self):\n \n lines = {}\n for k in self._d:\n lines[self._d[k]]='# %d %s'%(self._d[k],k.upper())\n #sort the new keys\n nkeys= lines.keys()\n nkeys.sort()\n #join them together with newlines\n ans = ''\n for k in nkeys:\n ans=ans+\"%s\\n\"%lines[k]\n return ans", "def _log_str(self):\n return (\n \"[name: {}, id: {}]\"\n .format(self._raw['Name'] if self._raw else \"<not retrieved>\", self._id)\n )", "def print_record(self, key):\n percentile_amount = str(int(round(self.running_percentile[key].get_percentile())))\n total_amount = str(int(round(self.running_percentile[key].total_amount)))\n count = str(len(self.running_percentile[key]))\n record = [key.recipient, key.zip_code, str(key.year), percentile_amount, total_amount, count]\n return '|'.join(record)", "def get_log_string(self, level=None):\r\n\r\n log_lines = [f\"Name:{str(self.name)} Level:{str(level)}\"]\r\n for key, item in self.log.items():\r\n log_lines.append(f\"{key}:\")\r\n if item:\r\n for entry in item:\r\n if not level or (entry[\"level\"] == level):\r\n log_lines.append(f\"\\t[{entry['level']} {entry['msg']}]\")\r\n return \"\\n\".join(log_lines)", "def merge_logs(self):\n ourlog = LogData()\n for l in self.data_set:\n ourlog.entries = ourlog.entries + l.entries\n ourlog.sort_time()\n self.finalized_data = ourlog", "def flatFormat(event: LogEvent) -> str:\n fieldValues = event[\"log_flattened\"]\n keyFlattener = KeyFlattener()\n s = []\n\n for literalText, fieldName, formatSpec, conversion in aFormatter.parse(\n event[\"log_format\"]\n ):\n s.append(literalText)\n\n if fieldName is not None:\n key = keyFlattener.flatKey(fieldName, formatSpec, conversion or \"s\")\n s.append(str(fieldValues[key]))\n\n return \"\".join(s)", "def to_logchunk(self):\n\t\tdemo_name = os.path.splitext(self.demo_name)[0]\n\t\tto_write = [(\"Killstreak\", value, tick, date) for value, tick, date in self.killstreaks]\n\t\tto_write.extend((\"Bookmark\", value, tick, date) for value, tick, date in self.bookmarks)\n\n\t\tto_write.sort(key = lambda t: t[2])\n\n\t\treturn \"\\n\".join(\n\t\t\tf'[{date}] {type_} {value} (\"{demo_name}\" at {tick})'\n\t\t\tfor type_, value, tick, date in to_write\n\t\t)", "def __generate_key_from_list_of(self, list_of_keys):\r\n list_of_keys = list(list_of_keys)\r\n list_of_keys.sort()\r\n return \",\".join(list_of_keys)", "def build_log_entry(\n hostname: str, user: str, date: dt.datetime, wdir: Path, cmd: str\n) -> str:\n return (\n f'[{date.strftime(\"%Y-%m-%d %H:%M:%S\")}] ({user}@{hostname}) '\n f\"{wdir}\\n\\t{cmd}\\n\"\n )", "def __str__(self):\n self.vals.sort()\n result = ''\n for e in self.vals:\n result = result + str(e) + ','\n return '{' + result[:-1] + '}'", "def __str__(self):\n return str(self._key) + \", \" + str(self._value[0]) + \", \" + str(self._value[1])", "def __str__(self):\r\n self.vals.sort()\r\n return '{' + ','.join([str(e) for e in self.vals]) + '}'", "def __str__(self):\n self.vals.sort()\n return '{' + ','.join([str(e) for e in self.vals]) + '}'", "def generate_data(item, target='key' or 'value'):\n data = []\n target_is_key = target == 'key'\n for key, value in OrderedDict(sorted(item.items())).items():\n if target_is_key:\n data.append(key)\n continue\n\n # For empty list we are just writing an empty string ''.\n if isinstance(value, list) and not len(value):\n value = ''\n\n data.append(value)\n\n return data", "def __str__(self):\n return '\\n'+'\\n'.join([\"%-15s: %s\" % (qq(w), str(v)) for w, v in sorted(self.value.items())]) + '\\0'", "def by_key(item):\n return Line['key', item]", "def __str__(self):\n return '(' + str(self.key) + ': ' + str(self.value) + ')'", "def __str__(self):\n return '(' + str(self.key) + ': ' + str(self.value) + ')'", "def build_column(data: List[Dict[str, Any]]) -> str:\n return \"\\n\".join(map(format_item, data[\"items\"][:5]))", "def get_log_item_name(self, item_name, item_attrs):\n log_item_name = None\n try:\n log_item_name = str(item_attrs['last_modified_timestamp']) + '_' + item_name\n except IndexError:\n log_item_name = '0' + '_' + item_name\n\n return log_item_name", "def _create_str(results_dict, level=0, parent=True):\n result = ''\n keys = sorted(results_dict.keys())\n if not keys:\n return result\n\n if parent:\n has_remote_entries = any(\n self._map(\n lambda lk, entry: not entry.physical_key.is_local()\n )\n )\n pkg_type = 'remote' if has_remote_entries else 'local'\n result = f'({pkg_type} Package)\\n'\n\n for key in keys:\n result += ' ' + (' ' * level) + '└─' + key + '\\n'\n result += _create_str(results_dict[key], level + 1, parent=False)\n\n return result", "def sort_key(self):\n ...", "def to_string(cls, hierarchical_dict: dict) -> str:\n keys = cls.get_all_keys(hierarchical_dict)\n keys = sorted(keys)\n res = \"\"\n for key in keys:\n res += f\"{key} = {FuseUtilsHierarchicalDict.get(hierarchical_dict, key)}\\n\"\n\n return res", "def test_log_key(self):\n kwargs = {\n \"follow_up\": None,\n \"notes\": \"This is a test.\",\n \"gdoc_link\": \"\",\n \"state_id\": \"KS\",\n \"contact\": None,\n \"user_id\": 9,\n \"formal_request\": False,\n \"date\": \"2013-03-28\",\n \"org_id\": 15,\n \"subject\": \"Test subject line\"\n }\n log = Log(**kwargs)\n log.save()\n self.assertEqual(log.log_key(), ('KS', '2013-03-28', 'Test subject line'))\n self.assertEqual(log.log_key(as_string=True), 'KS - 2013-03-28 - Test subject line')\n\n # Test with contact\n contact = Contact.objects.all()[0]\n log.contact = contact\n log.save()\n expected = (\n 'KS',\n '2013-03-28',\n u'Williams (Kansas Secretary of State elections division)',\n 'Test subject line',\n )\n self.assertEqual(expected, log.log_key())", "def format_date_sortkey(self, data):\n return self.input['start_date'].date().strftime('%Y%m%d')", "def __str__(self):\n string = \"| \"\n for i in range(self.size):\n if self.keys[i] is None:\n pass\n else:\n string += str(i) + \"(\" + str(self.keys[i]) + \", \" + str(self.values[i]) + \") \"\n string += \"|\"\n return string", "def key_to_string(cls, key):\n return '_'.join(map(str, key))", "def __str__(self):\n return '({0},{1})'.format(self.key, self.value)", "def _stab_log_data(self, timestamp, data, logconf):\n print('[%d][%s]: %s' % (timestamp, logconf.name, data))", "def format_time_sortkey(self, data):\n return self.input['start_time'].time().strftime('%H%M').lstrip('0')", "def log_kv(self, key_values, timestamp=None):\n return self", "def key(self)->str:\n return \"{}:{}.{}.{}\".format(self.source, self.db, self.ed, self.rec)", "def save_log(self, log, update=False):\n\n main_keys = [\n 'name',\n 'sender',\n 'receiver',\n 'time',\n 'duty',\n 'net'\n ]\n\n # print(test)\n\n lst = []\n for key in main_keys:\n # print(key)\n lst.append(log[key])\n log.pop(key)\n \n # LEGACY\n # inn_lst = []\n # for serial, val in log.items():\n # if not (serial in main_keys):\n # inn_lst.append(serial + '\\\\' + val)\n\n # lst.append('||'.join(inn_lst))\n\n lst.append(json.dumps(log))\n\n # print(lst)\n\n if update:\n self.update_record(lst, log['logID'])\n\n else:\n self.new_return(lst)", "def create_log_entry(data):\n\n if '__iter__' not in data.__dir__():\n return BadArgumentError(type(data))\n\n log_entry = str()\n\n for point in data:\n\n if '__iter__' in point.__dict__():\n joined_point = ','.join(point)\n log_entry += str(joined_point)\n else:\n log_entry += str(point) + ','\n\n return log_entry[:-1]", "def format(self, record):\n return '[{}] {}'.format(QBShFormatter.LEVEL_DICT[record.levelname], record.getMessage())", "def log_dict(self, source, agg=\"auto\", group=None):\n for key, val in source.items():\n if isinstance(val, dict):\n sub_group = key if group is None else group+\"->\"+key\n self.log_dict(val, agg=agg, group=sub_group)\n else:\n self.log(key, val, group=group, agg=agg)", "def dumps( source ):\n data = __convert( source )\n\n line = '{0}={1} '.format('rec_type', data['rec_type'])\n\n if 'eventSecond' in data.keys():\n eventSec = '{0}={1} '.format('event_sec', data['event_sec'])\n line = '{0} {1}'.format(line, eventSec)\n del data['eventSecond']\n\n if 'event_sec' in data.keys():\n eventSec = '{0}={1} '.format('event_sec', data['event_sec'])\n line = '{0}{1}'.format(line, eventSec)\n del data['event_sec']\n\n\n del data['rec_type']\n\n # from datetime import datetime\n # secs = datetime.now() - datetime(1970, 1, 1)\n # data['event_sec'] = int( secs.total_seconds() )\n\n line += kvdumps(\n data,\n delimiter = ' ',\n quoteEmptyString = True,\n sort = False,\n escapeNewLines = True )\n\n return line", "def transaction_recipient_agg_key(record: dict) -> str:\n if record[\"recipient_hash\"] is None or record[\"recipient_levels\"] is None:\n return json.dumps(\n {\n \"name\": record[\"recipient_name\"],\n \"duns\": record[\"recipient_unique_id\"],\n \"uei\": record[\"recipient_uei\"],\n \"hash_with_level\": \"\",\n }\n )\n return json.dumps(\n {\n \"name\": record[\"recipient_name\"],\n \"duns\": record[\"recipient_unique_id\"],\n \"uei\": record[\"recipient_uei\"],\n \"hash_with_level\": f\"{record['recipient_hash']}-{return_one_level(record['recipient_levels'])}\",\n }\n )", "def format(self, record: logging.LogRecord = None) -> str:\n # s = super().format(record)\n s = None\n e = {}\n e['id'] = uuid.uuid4().hex\n e['message'] = record.getMessage()\n # log.warning('record.message: %r', record.getMessage())\n # log.warning('record.args: %r', record.args)\n e['created'] = record.created\n e['priority'] = record.levelname\n e['args'] = record.args\n e['source_code'] = {}\n e['source_code']['pathname'] = record.pathname\n e['source_code']['funcName'] = record.funcName\n e['source_code']['lineno'] = record.lineno\n ctx = record.args.get(PIPELINE_CONTEXT_KEY, None)\n if ctx:\n e[PIPELINE_CONTEXT_KEY] = ctx.toDict()\n # use array enclosure a[] to mainain the log file\n # yaml compliant as new events are appended\n # - event1:\n # - event2:\n # - ...\n a = [e]\n s = yaml.dump(a)\n return s", "def _get_log_file(self, _action):\n prefix = \"work/{mapper}.{{library_name}}/log/{mapper}.{{library_name}}\".format(\n mapper=self.__class__.name\n )\n key_ext = (\n (\"log\", \".log\"),\n (\"conda_info\", \".conda_info.txt\"),\n (\"conda_list\", \".conda_list.txt\"),\n )\n for key, ext in key_ext:\n yield key, prefix + ext\n yield key + \"_md5\", prefix + ext + \".md5\"", "def kv_str(kvl):\n return '[%s]' % ', '.join('%s: %s' % (k, v) for k, v in kvl)", "def __str__(self):\n s = \"\"\n for i in self.__buckets:\n temp =\"\"\n while len(temp) + len(str(self.__buckets.index(i))) < 5:\n temp += \"_\" \n s += \"[{}{}]{}\\n\".format(temp, self.__buckets.index(i), str(i))\n return s", "def showd(d):\r\n return ' '.join([':%s %s' % (k,v)\r\n for k,v in\r\n sorted(d.items())\r\n if not \"_\" in k])", "def __str__(self):\n # These are required tags so we should have generated an\n # error beforehand and this shouldn't raise a ``KeyError``\n s = [(\"Album Title\", self[\"TITLE\"]), (\"Album Artist\", self[\"ARTIST\"]),\n (\"Year\", self[\"DATE_RECORDED\"]), (\"Genre\", self[\"GENRE\"])]\n s = OrderedDict(s)\n\n def add_optional(key):\n nonlocal s\n if key in self:\n text = key.replace('_', ' ').split(' ')\n text = ' '.join([x.capitalize() for x in text])\n s[text] = self[key]\n\n add_optional(\"LABEL\")\n add_optional(\"ISSUE_DATE\")\n add_optional(\"ORIGINAL_MEDIUM\")\n add_optional(\"VERSION\")\n add_optional(\"HD_FORMAT\")\n add_optional(\"DISC_NAME\")\n add_optional(\"PHASE_NAME\")\n if self.discs > 1:\n s[\"Disc\"] = self[\"PART_NUMBER\"]\n s[\"Discs\"] = self.discs\n if self.channels != \"2.0\":\n s[\"Channels\"] = self.channels\n # Now we have to deal with the formatted output. First we need\n # the maximum length of the keys to properly align the output\n # Note that the keys used will have a space appended, so we add 1\n max_len = max(len(x[0]) for x in s)+1\n\n # Output for an entry in ``s`` of (\"Year\", \"2016\") with a ``max_len`` of 10\n # would be: '= Year .....: 2016'\n def line(k, v):\n return f\"{k.ljust(max_len, '.')}: {v}\"\n\n s = [line(*x) for x in s.items()]\n # Now we can reuse ``max_len`` to mean the longest fully formatted line\n # We want to add '= ' to the left side and ' =' to the right side to\n # form a border\n max_len = max(len(x) for x in s)\n s = [f'= {x:{max_len}} =' for x in s]\n max_len += 4\n s = [\" ALBUM INFORMATION \".center(max_len, \"=\")] + s + [\"=\" * max_len]\n return \"\\n\".join(s)", "def write(self, data: dict):\n self.logger.info(\"\\t\".join(str(x) for x in data.values()))", "def format_result(self):\n return ('{}\\n\\n{}'.format(\n LogParser.format_dict(LogParser.order_dict(self.urls)[:3]),\n LogParser.format_dict(LogParser.order_dict(self.status_codes))))", "def __str__(self):\n columns = list(self.metrics.keys())\n columns.sort()\n out = '%s\\n' % ','.join(columns)\n values = [str(self.metrics[c]) for c in columns]\n out += '%s\\n' % ','.join(values)\n return out", "def _CreateQuickLog(namespace, key):\n namespaced_key = '%s__%s' % (namespace, key)\n log = QuickLog(id=namespaced_key)\n log.put()\n return log", "def format(self, data):\r\n for name, value in sorted(data.items()):\r\n line = '{name} = {value}\\n'.format(\r\n name=name,\r\n value=value,\r\n )\r\n yield line", "def write_with_log(self, key: str, value: Any) -> None:\n self.__setitem__(key, value)", "def makeChronList(self):\n from operator import itemgetter\n ## make list of msg lists in the format accespted by reconstructLine\n self.outData_temp = [] # this will be in chronological order\n for sens in self.outData:\n if sens is not 'header':\n for meas in self.outData[sens]:\n for time in self.outData[sens][meas]:\n value = self.outData[sens][meas][time]\n thismsg = [time, sens, meas, str(value)] # leave time as float for sorting\n self.outData_temp.append(thismsg)\n self.outData_temp.sort(key=itemgetter(0)) # sort by first index\n for msg in self.outData_temp: # now we can make time a string\n msg[0] = str(msg[0])", "def flat_dash_logs(dashes):\n logs = []\n for dash in dashes:\n current_logs = dash.get(\"logs\")\n for log in current_logs:\n log[\"path\"] = dash.get(\"path\")\n logs.append(log)\n\n logs.sort(key=lambda x: parse_timestamp(x[\"created_at\"]), reverse=True)\n return logs", "def get(self, key=False, httpformat=False):\n if not key:\n result = self.data\n elif not isinstance(key, basestring):\n raise TypeError('keys have to be string')\n else:\n result = []\n for k, v in self.data:\n if k.lower() == key.lower():\n result.append((str(k), str(v)))\n if httpformat:\n return '\\n'.join(['%s: %s' % item for item in result])\n return result", "def format(self, record):\n data = dict()\n\n data[\"category\"] = record.name\n data[\"timestamp\"] = datetime.datetime.utcnow()\\\n .replace(tzinfo=utc)\\\n .strftime('%Y-%m-%dT%H:%M:%SZ')\n data[\"level\"] = record.levelname\n data[\"message\"] = record.msg\n data[\"threadName\"] = record.threadName\n data[\"hostname\"] = self.hostname\n \n return data", "def _to_string(self) -> str:\n\n string_list = []\n for key, value in self.__dict__.items():\n if isinstance(value, dict):\n string_list.append(key)\n string_list.extend('\\n'.join([\"Key: {:24}\\tValue: {}\".format(_key, _value) for _key, _value in value.items()]))\n else:\n string_list.append(\"Key: {:24}\\tValue: {}\\n\".format(key, value))\n return ''.join(string_list)", "def tree_str(self, depth_index=0, recursive_dict=None):\r\n if not hasattr(self,'iteritems'): return ''\r\n if recursive_dict is not None: self = TreeMap(recursive_dict)\r\n buff_str = ''\r\n \r\n for item in self.iteritems():\r\n # Starts working now.\r\n k = item[0]\r\n v = item[1]\r\n \r\n spacer = '\\n' + '| ' * depth_index\r\n \r\n if hasattr(v,'iteritems'):\r\n buff_str += spacer + '+--[ ' + k + ' ]'\r\n buff_str += self.tree_str(depth_index=depth_index + 1, recursive_dict=v)\r\n else:\r\n buff_str += spacer + '\\_.--[ ' + str(k) + ' = ' + str(v) + ' ]'\r\n \r\n return buff_str", "def __str__(self):\n outbuffer = []\n outbuffer.append(\"%d keys in dataset\" % len(self.__quantile))\n outbuffer.append(self.head())\n outbuffer.append(\"...\")\n outbuffer.append(self.tail())\n return \"\\n\".join(outbuffer)", "def __str__(self):\n return functools.reduce(\n lambda acc, v: acc + str(v[0]) + \" : \" + str(v[1][1]) + \" - lifetime \" + str(v[1][0]) + os.linesep,\n self.store.items(), \"\")", "def _build_key(self, key: str) -> str:\n return \"-\".join((self._name, key))", "def formatSloppily(data):\n\n if isinstance(data, str) or isinstance(data, unicode):\n return data\n elif hasattr(data, \"get\") and hasattr(data, \"keys\"):\n # Write it as a comma-separted list, with colons separating keys\n # and values.\n return \", \".join(\n [ \"%s: %s\" % (formatSloppily(k), formatSloppily(data[k]))\n for k in data.keys() ])\n elif hasattr(data, \"__len__\"):\n # Write it as a comma-separated sequence.\n return \", \".join(map(formatSloppily, data))\n else:\n return str(data)", "def __repr__(self):\n # TODO: Instead of \"Key('Foo', 1)\" perhaps return \"Key(Foo, 1)\" ?\n args = []\n for item in self.flat():\n if not item:\n args.append('None')\n elif isinstance(item, basestring):\n if not isinstance(item, str):\n raise TypeError('Key item is not an 8-bit string %r' % item)\n args.append(repr(item))\n else:\n args.append(str(item))\n if self.app() != _DefaultAppId():\n args.append('app=%r' % self.app())\n if self.namespace() != _DefaultNamespace():\n args.append('namespace=%r' % self.namespace())\n return 'Key(%s)' % ', '.join(args)", "def _fieldList(self, key, year, month=None, day=None, hour=None, status=1, metaData=None):\n fields = [StatusLog.FIELD_TIMESTAMP]\n if (key is not None):\n fields.append(StatusLog.FIELD_KEY)\n if (year is not None):\n fields.append(StatusLog.FIELD_YEAR)\n if (month is not None):\n fields.append(StatusLog.FIELD_MONTH)\n if (day is not None):\n fields.append(StatusLog.FIELD_DAY)\n if (hour is not None):\n fields.append(StatusLog.FIELD_HOUR)\n if (status is not None):\n fields.append(StatusLog.FIELD_STATUS)\n if (metaData is not None):\n fields.append(StatusLog.FIELD_METADATA)\n \n # Make a string\n return '(' + ', '.join(fields) + ')'", "def get_formatted_task_log(self):\n try:\n log = requests.get(self.gs_base_url + \"/out.log\").content\n except:\n return [f\"####-##-## ##:##:## Task ID: {self.name}\\n\"]\n return (f\"####-##-## ##:##:## Task ID: {self.name}\\n\" + log.decode('utf-8')).splitlines()", "def as_keyvalue(self) -> str:\n sep = ',' if self.comma_sep else self.SEP\n return (\n f'\"{self.exp_out()}\" \"{self.target}{sep}{self.exp_in()}'\n f'{sep}{self.params}{sep}{self.delay:g}{sep}{self.times}\"\\n'\n )", "def __str__(self):\n return str(self.LOG_TYPES[self.name])", "def __str__(self):\n return '<{}>'.format(self.key.id())", "def __str__(self):\n return '<{}>'.format(self.key.id())", "def __str__(self):\n sio = StringIO()\n for k in self:\n sio.write(\"%s %s\\n\" % (repr(k), repr(self[k])))\n return sio.getvalue()", "def _key_to_str(self, key: Any) -> Any:\n if isinstance(key, str):\n return key\n if isinstance(key, int):\n return list(self._data_vars.keys())[key]\n if isinstance(key, slice):\n s = key.indices(len(self))\n return self._key_to_str(list(range(*s)))\n if isinstance(key, Iterable):\n keys = []\n for k in key:\n keys.append(self._key_to_str(k))\n return keys\n if hasattr(key, \"name\"):\n return key.name\n raise TypeError(f\"indexing with type {type(key)} is not supported\")", "def __str__(self):\n return 'CacheItem: [{key}-{value}]'.format(key=self._key, value=self._value)", "def cee_dict_to_rsyslog(cls, cee_dict):\n structured_data = cee_dict.get('native')\n if structured_data is not None:\n structured_data = cls.sd_dict_to_syslog_str(structured_data)\n\n log = ('<{pri}>{ver} {time} {host} {app} {pid} {msgid} {sd} '\n '{msg}').format(\n pri=cee_dict.get('pri'),\n time=cee_dict.get('time', '-'),\n ver=cee_dict.get('ver'),\n host=cee_dict.get('host', '-'),\n app=cee_dict.get('pname', '-'),\n pid=cee_dict.get('pid', '-'),\n msgid=cee_dict.get('msgid', '-'),\n sd=structured_data or '-',\n msg=cee_dict.get('msg'))\n\n return b'{length} {syslog}'.format(length=len(log), syslog=log)", "def initialize_log_data(ids_bcs_added_field):\r\n\r\n log_data = {}\r\n\r\n for curr_key in ids_bcs_added_field.keys():\r\n base_key = \"\"\r\n if curr_key[0]:\r\n base_key += curr_key[0] + \",\"\r\n if curr_key[1]:\r\n base_key += curr_key[1] + \",\"\r\n base_key += ids_bcs_added_field[curr_key]\r\n log_data[base_key] = 0\r\n\r\n return log_data", "def __str__(self):\n\n # Seven tag roster list\n strl = [\"Event\",\"Site\",\"Date\",\"Round\",\"White\",\"Black\",\"Result\"]\n out = \"\"\n\n # We first print in order of STR, then any others\n for k in strl:\n if k in self.keys():\n out += '[{} \"{}\"]\\n'.format(k, self[k])\n\n for k in self.keys():\n if k not in strl:\n out += '[{} \"{}\"]\\n'.format(k, self[k])\n\n # If there are no tag pairs, the extra newline is not needed\n if out:\n out += \"\\n\"\n return out", "def get_key(self, state: Dict) -> str:\n\n return \"_\".join(sorted(state))", "def format(self, record: logging.LogRecord) -> str:\n return filter_datum(self.fields, self.REDACTION,\n super().format(record), self.SEPARATOR)", "def new_archive_record(self, event):\n print \"REC: \", weeutil.weeutil.timestamp_to_string(event.record['dateTime']), StdPrint.sort(event.record)", "def format(self, record):\n row = [self.formatTime(record, self.datefmt), record.name, record.levelname]\n keys = filter(self.filterer, record.__dict__)\n extra = [record.__dict__[k] for k in keys]\n\n self.writer.writerow(row + extra + [record.getMessage()])\n data = self.output.getvalue()\n self.output.truncate(0)\n self.output.seek(0)\n return data.strip()", "def stringify_change(change):\n key = change.key\n a = change.a or '<null>'\n b = change.b or '<null>'\n return '{}: {} => {}'.format(key, a, b)", "def format(self, record):\n extra = {\n \"message\": record.getMessage(),\n \"time\": self.formatTime(record, self.datefmt),\n \"msecs\": record.msecs,\n \"name\": record.name,\n \"level\": record.levelname,\n }\n\n keys = filter(self.filterer, record.__dict__)\n extra.update({k: record.__dict__[k] for k in keys})\n return str(CustomEncoder().encode(extra))", "def _writeLog (self, item):\n self.log.write (item.encode (self.logEncoding))\n self.log.write (b'\\n')\n # instead of locking, check we’re running in the main thread\n if self.log.tell () > self.maxLogSize and \\\n threading.current_thread () is threading.main_thread ():\n self._flushLogEntries ()", "def makeString(self, a):\n out = \"\"\n if type(a) is dict:\n for key, val in a.items():\n out = \"%s%s%s%s%s\" % (out, key, self.dataDelimiterKey, val, self.dataDelimiterEntry)\n return out\n elif type(a) is list:\n return \"%s%s\" % (self.dataDelimiterEntry.join(a), self.dataDelimiterEntry)", "def get_out_of_date_strings(data):\n result = []\n if data:\n max_key_width = max([len(key) for key in data])\n max_val1_width = max([len(str(val[0])) for val in data.values()])\n max_val2_width = max([len(str(val[1])) for val in data.values()])\n for key, val in data.iteritems():\n output_string = (u\"{:>{max_key}} JSS Version:{:>{max_val1}} App \"\n \"Store Version: {:>{max_val2}}\".format(\n key, val[0], val[1], max_key=max_key_width,\n max_val1=max_val1_width,\n max_val2=max_val2_width))\n result.append(output_string)\n return result", "def __str__(self):\n if len(self.__keys) == 0:\n return '{}'\n output = '{'\n fmt = '{}: {}, '\n for key, val in zip(self.__keys, self.__vals):\n output += fmt.format(repr(key), repr(val))\n return output[:-2] + '}'", "def format(self, record: LogRecord) -> str:\n record.asctime = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n message = record.getMessage()\n if record.exc_info:\n eno = record.exc_info\n stacktrace = \"\".join(traceback.format_exception(None, eno[1], eno[2]))\n message += f\" excp: {stacktrace}\"\n if record.stack_info:\n stack = self.formatStack(record.stack_info)\n message += f\" trace: {stack}\"\n\n log_output = {\n \"tool\": type(self.checker).__name__,\n \"type\": \"infrastructure\",\n \"severity\": record.levelname,\n \"severityLevel\": max(0, record.levelno // 10 - 1),\n \"timestamp\": record.asctime,\n \"module\": record.module,\n \"function\": record.funcName,\n \"flag\": self.checker.flag,\n \"flagIndex\": self.checker.flag_idx,\n \"runId\": self.checker.run_id,\n \"roundId\": self.checker.round,\n \"relatedRoundId\": self.checker.flag_round,\n \"message\": message,\n \"teamName\": self.checker.team,\n \"teamId\": self.checker.team_id,\n \"serviceName\": self.checker.service_name,\n \"method\": self.checker.method,\n }\n\n return LOGGING_PREFIX + json.dumps(log_output)", "def to_string(self):\n result = \"\"\n if self.hostname:\n result += \"Hostname %s\\n\" % self.hostname\n if self.local_dirname:\n result += \"Localdir %s\\n\" % Quote(self.local_dirname)\n\n result += \"Filelist %d\\n\" % len(self.files_changed)\n for fileinfo in self.files_changed:\n result += \" %-7s %s\\n\" % (fileinfo[1], Quote(fileinfo[0]))\n\n vol_num_list = self.volume_info_dict.keys()\n vol_num_list.sort()\n\n def vol_num_to_string(vol_num):\n return self.volume_info_dict[vol_num].to_string()\n result = \"%s%s\\n\" % (result,\n \"\\n\".join(map(vol_num_to_string, vol_num_list)))\n return result", "def __build_message_to_print_in_log(log: LogModel) -> Optional[str]:\n\n if log is None:\n return None\n\n log_level_name: str = LogHelper.get_log_level_name(log.log_level)\n message: str = \\\n f'{log.creation_date} |->\\t[{log_level_name}]\\t{log.message}\\t\\t[Line: {log.line_number}]\\t[{log.filename}]'\n\n return message", "def sd_dict_to_syslog_str(cls, sd_dict):\n syslog_sds = ''\n for sd_key, sd_val in list(sd_dict.items()):\n syslog_str = '[{sd_key}'.format(sd_key=sd_key)\n\n for sub_key, sub_val in list(sd_val.items()):\n syslog_str = '{orig} {key}=\"{value}\"'.format(\n orig=syslog_str, key=sub_key, value=sub_val)\n syslog_str += ']'\n\n syslog_sds += syslog_str\n\n return syslog_sds", "def createLog(submissions: list) -> tuple:\n\n # map from problems to list of submissions\n log = []\n\n for user in submissions:\n usersubs = submissions[user]\n # Put the submissions into the probs list\n for sub in usersubs:\n if all(i == \"ok\" for i in sub.results):\n foundMatch = False\n\n for (j, k, l, m) in log:\n if k == User.get(user).username and m == sub.problem.id and j < sub.timestamp:\n foundMatch = True\n break\n elif k == User.get(user).username and m == sub.problem.id and j > sub.timestamp:\n foundMatch = False\n log.remove((j,k,l,m))\n break\n\n if not foundMatch:\n temp = (sub.timestamp, User.get(user).username, sub.problem.title, sub.problem.id)\n log.append(temp) \n\n log.sort(key = operator.itemgetter(0), reverse=True)\n print(log)\n return log", "def __str__(self):\n out = \"\"\n index = 0\n for bucket in self._buckets:\n out = out + str(index) + ': ' + str(bucket) + '\\n'\n index = index + 1\n return out", "def __str__(self):\r\n\r\n\t\tout = \"\"\r\n\t\tindex = 0\r\n\t\tfor bucket in self._buckets:\r\n\t\t\tout = out + str(index) + ': ' + str(bucket) + '\\n'\r\n\t\t\tindex = index + 1\r\n\t\treturn out", "def build_header(self, app_name, host_name, message_id, priority,\n process_id, version, timestamp, sd):\n head = SyslogMessageHead()\n head.appname = app_name or '-'\n head.hostname = host_name or '-'\n head.messageid = message_id or '-'\n head.priority = priority or '-'\n head.processid = process_id or '-'\n head.timestamp = timestamp or '-'\n head.version = version or '-'\n head.sd = sd or {}\n return head", "def generate_str_key(*args, **kwargs):\n\n return ''.join((\n str(kwargs.get('index')),\n str(kwargs.get('doc_type')),\n str(RecursivelySortedDict(kwargs.get('body', {}))),\n str(RecursivelySortedDict(kwargs.get('query', {})))))", "def debug_repr(self) -> str:\n repr_string = \"{}(Confi):\\n\".format(self.__class__.__name__)\n items = list(self.entries.items())\n items.sort(key = lambda item: item[0])\n indent = ' ' * 4\n for key, entry in items:\n repr_string += f\"{indent}{key}: {repr(entry.value)}\\n\"\n return repr_string", "def __str__(self):\n\n out = \"\"\n index = 0\n for bucket in self._buckets:\n out = out + str(index) + ': ' + str(bucket) + '\\n'\n index = index + 1\n return out" ]
[ "0.596858", "0.5945771", "0.5798388", "0.57700557", "0.54572475", "0.54416263", "0.5441615", "0.54370725", "0.5413551", "0.52532613", "0.5185452", "0.517661", "0.5171567", "0.5147455", "0.51468164", "0.5139392", "0.51186264", "0.5117158", "0.5103329", "0.5091452", "0.5071409", "0.5069917", "0.50488627", "0.50488627", "0.5042852", "0.5000648", "0.50001717", "0.49943596", "0.49861467", "0.49745613", "0.4969483", "0.49568945", "0.49560148", "0.4951475", "0.49487513", "0.4945776", "0.49442142", "0.49426967", "0.49406028", "0.4936158", "0.49292758", "0.49292678", "0.49286172", "0.49133727", "0.49043864", "0.48836976", "0.48726797", "0.4869137", "0.4867236", "0.4864852", "0.48607373", "0.48597363", "0.48460642", "0.4845178", "0.48413062", "0.48287553", "0.4828722", "0.48113382", "0.47997698", "0.47977522", "0.47888348", "0.47873044", "0.47829285", "0.47822815", "0.47805947", "0.47618592", "0.47597992", "0.4754993", "0.4750861", "0.4750489", "0.47471255", "0.47420666", "0.47420666", "0.47406125", "0.47304317", "0.47259223", "0.4725866", "0.4717815", "0.47136095", "0.47129256", "0.47115043", "0.47091365", "0.4707122", "0.47047886", "0.47043726", "0.4698513", "0.4695854", "0.4692419", "0.46918067", "0.46863005", "0.46847072", "0.46821058", "0.4678672", "0.4676076", "0.46721792", "0.466594", "0.46651724", "0.46625772", "0.46419668", "0.46374276" ]
0.5780433
3
Create a LogEntry from the log string produced by get_log_string().
def from_log_string(log_string): first_part = None second_part = None if not log_string.endswith("}"): # Value error for later use value_error = ValueError("Given string has invalid format: {}".format(log_string)) bracket_idx = log_string.find("}") last_comma_idx = log_string.find(",", bracket_idx) if last_comma_idx != bracket_idx + 1: raise value_error # The bracket is kept first_part = log_string[:bracket_idx + 1] # The comma is removed second_part = log_string[last_comma_idx + 1:] if "}" not in first_part or "}" in second_part or "{" in second_part: raise value_error data_dict = json.loads(first_part) return LogEntry.from_data(data_dict, second_part)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_log_line(cls, log_line: str):\n match = cls.entry_format.match(log_line)\n if not match:\n return None\n\n date = match.group(\"date\")\n time = match.group(\"time\")\n offset = match.group(\"offset\")\n\n entry = cls(\n ip_address=match.group(\"ip_address\"),\n username=match.group(\"username\"),\n timestamp=f\"{date} {time} {offset}\",\n verb=match.group(\"verb\"),\n path=match.group(\"path\"),\n version=match.group(\"http_version\"),\n status=match.group(\"response_status\"),\n size=match.group(\"size\"))\n\n return entry", "def parse_log_entry(self, logstring):\n\n splitLogInfo = logstring.partition(self.LOGFILE_PREFIX)\n if len(splitLogInfo[1]) == 0:\n raise errorhandler.LogDatabaseError(\"separator {} not found in log entry\".format(self.LOGFILE_PREFIX))\n str2 = splitLogInfo[2]\n\n entrytype = None\n for k, v in self.validpostfixes.items():\n if splitLogInfo[2][0:len(k)] == k:\n entrytype = v\n break\n if entrytype is None:\n raise errorhandler.LogDatabaseError(\"Invalid log type: {}\".format(splitLogInfo[2][0:10]))\n\n try:\n timestringtrimmed = logstring.partition(\".\")[0]\n timestamp = datetime.datetime(*time.strptime(timestringtrimmed, \"%Y-%m-%dT%H:%M:%S\")[:6])\n except ValueError:\n raise errorhandler.LogDatabaseError(\"Value error parsing timestamp out of log entry\")\n\n mactokens = {\n \"MAC source\": \"MAC source = \",\n \"MAC dest\": \"MAC dest = \",\n }\n indices = []\n lastidx = 0\n for k, v in mactokens.items():\n nextidx = str2.find(v, lastidx)\n if nextidx < 0:\n raise errorhandler.LogDatabaseError(\"{} not found in log entry\".format(k))\n indices.append(nextidx + len(v))\n lastidx = nextidx\n srcMAC = str2[indices[0] : indices[0] + 17]\n dstMAC = str2[indices[1] : indices[1] + 17]\n\n iptokens = {\n \"IP source\": \"IP SRC=\",\n \"IP dest\": \"IP DST=\",\n \"IP source port\": \"SPT=\",\n \"IP dest port\": \"DPT=\"\n }\n if entrytype == LogEntryType.UNKNOWN_IP or entrytype == LogEntryType.IP_TRAFFIC_IN \\\n or entrytype == LogEntryType.IP_TRAFFIC_OUT or entrytype == LogEntryType.DROP:\n for k, v in iptokens.items():\n nextidx = str2.find(v, lastidx)\n if nextidx < 0:\n raise errorhandler.LogDatabaseError(\"{} not found in log entry\".format(k))\n indices.append(nextidx + len(v))\n lastidx = nextidx\n\n srcIP = extract_ip(str2, indices[2])\n dstIP = extract_ip(str2, indices[3])\n srcPort = str2[indices[4]:].partition(\" \")[0]\n dstPort = str2[indices[5]:]\n else:\n srcIP = \"\"\n dstIP = \"\"\n srcPort = \"\"\n dstPort = \"\"\n\n logdataentry = LogDataEntry(entry_type=entrytype, timestamp=timestamp, srcMAC=srcMAC, dstMAC=dstMAC, srcIP=srcIP, dstIP=dstIP,\n srcPort=srcPort, dstPort=dstPort)\n return logdataentry", "def from_data(data_dict, intrusion=None):\n\n\t\t# Data is verified in the ctor and setters\n\t\treturn LogEntry(vin=data_dict[LogEntry.VIN_FIELD], app_id=data_dict[LogEntry.APP_ID_FIELD],\n\t\t\tlevel=data_dict[LogEntry.LEVEL_FIELD], log_message=data_dict[LogEntry.LOG_MESSAGE_FIELD],\n\t\t\tgps_position=data_dict[LogEntry.GPS_POSITION_FIELD],\n\t\t\ttime_unix=data_dict[LogEntry.TIME_UNIX_FIELD], log_id=data_dict[LogEntry.LOG_ID_FIELD],\n\t\t\tintrusion=intrusion)", "def add_log_entry_string(self, logstring):\n parsed = self.parse_log_entry(logstring)\n self.add_log_entry(parsed)", "def process_entry(self,\n log_entry: str):\n elem = ET.fromstring(log_entry)\n rev = elem.attrib['revision']\n values = {}\n for sub in ['author', 'date', 'msg']:\n try:\n values[sub] = elem.find(f'./{sub}').text\n except (AttributeError, SyntaxError):\n log.warning('failed to retrieve %s in %s', sub, log_entry)\n values[sub] = None\n if values['msg']:\n values['msg'] = values['msg'].replace('\\n', ' ')\n rel_url_slash = self.relative_url + '/'\n for path_elem in elem.findall('*/path'):\n other = {}\n for sub in ['text-mods', 'kind', 'action', 'prop-mods',\n 'copyfrom-rev', 'copyfrom-path']:\n try:\n other[sub] = path_elem.attrib[sub]\n except (AttributeError, SyntaxError, KeyError):\n other[sub] = np.nan\n try:\n path = path_elem.text.replace(rel_url_slash, '')\n except (AttributeError, SyntaxError, ValueError) as err:\n log.warning(f'{err} processing rev {rev}')\n path = None\n entry = scm.LogEntry(rev, values['author'], to_date(values['date']),\n path=path, message=values['msg'],\n textmods=to_bool(other['text-mods']),\n kind=other['kind'], action=other['action'],\n propmods=to_bool(other['prop-mods']),\n copyfromrev=other['copyfrom-rev'],\n copyfrompath=other['copyfrom-path'],\n added=np.nan, removed=np.nan)\n yield entry", "def parse_raw_entry(raw_entry):\n entry_start = raw_entry[0]\n\n # get the timestamp\n ts_len = 23\n ts = entry_start[:ts_len]\n # get the IP, if there is one\n idx = entry_start.find(' ', ts_len+1)\n ip = entry_start[ts_len+1:idx]\n # get the database, if there is one\n consumed = idx\n idx = entry_start.find(' ', consumed+1)\n db = entry_start[consumed+1:idx]\n # get the log type\n consumed = idx\n idx = entry_start.find(' ', consumed+1)\n type = entry_start[consumed+1:idx]\n # finally, combined the message\n consumed = idx\n remaining = entry_start[consumed+1:]\n foo = [remaining]\n foo.extend(raw_entry[1:])\n msg = ''.join(foo).strip()\n\n return Entry(ts, ip, db, type, msg)", "def parse_log_entry(line):\n\n line_pattern = r\"^(?P<host>.*) - - \\[(?P<timestamp>.*)\\] \" \\\n \"\\\"(?P<request>.*)\\\" (?P<http_code>\\d\\d\\d) (?P<bytes>.*)$\"\n line_groups = re.match(line_pattern, line)\n request_pattern = r\"^(?P<request_method>[A-Z]*) (?P<resource>\\S+) ?.*$\"\n request_groups = re.match(request_pattern, line_groups.group('request'))\n host = line_groups.group('host')\n timestamp = line_groups.group('timestamp')\n timestamp = parse_date(line_groups.group('timestamp'))\n http_code = int(line_groups.group('http_code'))\n num_bytes = line_groups.group('bytes')\n num_bytes = 0 if num_bytes == '-' else int(num_bytes)\n if request_groups:\n request_method = request_groups.group('request_method')\n resource = request_groups.group('resource')\n else:\n request_method = None\n resource = None\n return ParsedRequest(\n host, timestamp, request_method,\n resource, http_code, num_bytes)", "def copy(log_entry):\n\n\t\tassert(isinstance(log_entry, LogEntry))\n\t\treturn LogEntry.from_data(log_entry.data, log_entry.intrusion)", "def processLogLine(logline):\n logline = logline.split()\n log = LogLine(logline[0], logline[1], logline[2], logline[4],\\\n float(logline[6]), float(logline[8]), float(logline[10]), logline[12])\n return log", "def __init__(self, log_string):\n match_string = r'\\[([0-9]+) \\| ([a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}) \\| \\((\\s*[0-9]+,\\s*[0-9]+)\\)\\] ([\\S\\s]*)' # noqa: E501\n groups = re.search(match_string, log_string).groups()\n self.time = int(groups[0]) # Event timestamp\n self.id = groups[1] # ID of the sprite that the event affects\n x, y = int(groups[2].split()[0][:-1]), int(groups[2].split()[1])\n self.point = Point(x, y) # The location of the event\n for action in Actions:\n if groups[3].strip() == action.name:\n self.action = action # The type of event\n break\n else:\n # Spawn because it is followed by what spawned\n self.action = Actions.Spawned\n self.sprite = groups[3][7:].strip() # The type of sprite that was spawned", "def from_line(cls, line: str):\n dt = datetime.datetime.strptime(line[:19], \"%Y-%m-%d %H:%M:%S\")\n elems = line.split(\" \")\n level = elems[2]\n caller = elems[3].split(\"(\")[0]\n msg = \" \".join(elems[4:]).strip()\n return cls(dt, level, caller, msg)", "def create_entry(entry):\n Entry.create(**entry)\n return entry", "def create_base_entry(vin=\"INVALID\", time_unix=None):\n\t\treturn LogEntry(vin=vin, app_id=\"INVALID\", time_unix=time_unix)", "def make_entry(line):\n #focus on relevant parts\n parts = line.split(\" - \")\n visitor_id = parts[0]\n subparts = parts[1].split('\"')\n method_and_uri = subparts[1]\n method_and_uri_parts = method_and_uri.split(\" \")\n method = method_and_uri_parts[0]\n uri = method_and_uri_parts[1]\n d = dict()\n d[\"visitor_id\"] = visitor_id\n d[\"method\"] = method\n d[\"uri\"] = uri\n return d", "def parse_logs(node, logs):\n entries = []\n lines = logs.splitlines(False)\n while lines:\n line = lines.pop(0)\n while lines and not ((Log.log_juba.match(lines[0]) or Log.log_zk.match(lines[0]))):\n line += '\\n' + lines.pop(0)\n try:\n entries.append(Log(node, line))\n except JubaTestAssertionError as e:\n log.warning('failed to parse log line: %s', line)\n return entries", "def create_or_update_log(log_type, log_entry, header=False):\n if log_type not in list(LOG_TYPES.values()):\n raise BadArgumentError(log_type, log_entry)\n\n if '__iter__' not in log_entry.__dir__():\n raise TypeError(\n f'Data needs to be of iterable type, type {type(log_entry)} received')\n\n log_file_path = os.path.join(CWD, LOG_PATH, LOG_FILE.format(log_type))\n\n if log_type == 'train_data':\n if os.path.exists(log_file_path):\n os.remove(log_file_path)\n log_file = open(log_file_path, 'w')\n log_file.write(log_entry)\n log_file.close()\n return None\n\n file_mode = 'a' if header else 'w+'\n\n for i in range(len(log_entry)):\n log_entry[i] = str(log_entry[i])\n\n log_file = open(log_file_path, file_mode)\n\n if header:\n log_file.write(header)\n\n for entry in log_entry:\n log_file.write(entry)\n\n log_file.close()", "def get_TestEntry_instance(string, config):\n paren_i = string.find(\"(\")\n if paren_i > 0:\n args = string[paren_i+1:-1]\n string = string[:paren_i]\n args, kwargs = core.parse_args(args)\n else:\n args = ()\n kwargs = {}\n try:\n cls = module.get_object(string)\n except (module.ModuleImportError, module.ObjectImportError), err:\n logging.warn(err)\n return None\n testinstance = cls(config)\n return core.TestEntry(testinstance, args, kwargs, False)", "def test_log_str(self) -> None:\n log_str = (\n \"Log: [log_id: 1, username: andy, first: Andrew, last: Jarombek, \"\n \"name: Van Cortlandt NYRR XC 5K, location: Bronx, NY, date: 2019-11-17 00:00:00, type: run, \"\n \"distance: 5, metric: kilometers, miles: 3.11, time: 17:35, \"\n \"pace: 5:40, feel: 5, \"\n \"description: Didn't run very fast and felt tired, but it was nice to run a cross country race again., \"\n \"time_created: 2019-11-17 00:00:00, deleted: None]\"\n )\n\n self.maxDiff = None\n self.assertEqual(str(self.log1), log_str)\n\n # pylint: disable=unnecessary-dunder-call\n self.assertEqual(self.log1.__str__(), log_str)", "def from_raw_logchunk(cls, in_chk):\n\t\tloglines = in_chk.content.split(\"\\n\")\n\t\tif not loglines:\n\t\t\traise ValueError(\"Logchunks may not be empty.\")\n\t\tregres = RE_LINE.search(loglines[0])\n\t\tif not regres:\n\t\t\traise ValueError(\"Regex match failed, Logchunk malformed.\")\n\t\tdemo = regres[GROUP.DEMO] + \".dem\"\n\n\t\tkillstreaks = []\n\t\tbookmarks = []\n\t\tfor line in loglines:\n\t\t\tregres = RE_LINE.search(line)\n\t\t\tif regres is None:\n\t\t\t\traise ValueError(\"Regex match failed, Logchunk malformed.\")\n\t\t\tline_type = regres[GROUP.TYPE]\n\t\t\tvalue = regres[GROUP.VALUE]\n\t\t\ttick = int(regres[GROUP.TICK])\n\t\t\tif line_type == \"Killstreak\":\n\t\t\t\tkillstreaks.append(DemoEvent(int(value), tick, regres[GROUP.DATE]))\n\t\t\telif line_type == \"Bookmark\":\n\t\t\t\tbookmarks.append(DemoEvent(value, tick, regres[GROUP.DATE]))\n\n\t\treturn cls(demo, killstreaks, bookmarks)", "def create_from_record(self, record, **kwargs):\n for k in ('url', 'view', 'request', 'data'):\n if k not in kwargs:\n kwargs[k] = record.__dict__.get(k)\n \n kwargs.update({\n 'logger': record.name,\n 'level': record.levelno,\n 'message': force_unicode(record.msg),\n 'server_name': conf.NAME,\n })\n \n # construct the checksum with the unparsed message\n kwargs['checksum'] = construct_checksum(**kwargs)\n \n # save the message with included formatting\n kwargs['message'] = record.getMessage()\n \n # If there's no exception being processed, exc_info may be a 3-tuple of None\n # http://docs.python.org/library/sys.html#sys.exc_info\n if record.exc_info and all(record.exc_info):\n return self.create_from_exception(record.exc_info, **kwargs)\n\n return self.process(\n traceback=record.exc_text,\n **kwargs\n )", "def extract_timestamp(log_entry):\n pattern = r\"\\[(.+)\\]\\s\"\n match = re.search(pattern, log_entry)\n\n if not match:\n msg = \"The date was not found in the following log entry: {}\".format(log_entry)\n raise DateNotFound(msg)\n\n timestamp_str = match.group().replace('[', '').replace(']', '').strip()\n timestamp = parser.parse(timestamp_str, fuzzy=True)\n\n return timestamp", "def create_instance(test_id, config, args):\n return TestLogs(test_id, config, args)", "def object_decoder(obj):\n\t\tif 'logfile' in obj:\n\t\t\treturn logfile(obj['logfile']['name'], obj['logfile']['lines'], obj['logfile']['type'], obj['logfile']['content'], obj['logfile']['sources'])\n\t\tif 'logfile_entry' in obj:\n\t\t\tif len(obj['logfile_entry']['timestamp']['datetime']) >= 20 :\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S.%f\")\n\t\t\telif obj['logfile_entry']['timestamp']['datetime'][-6:-5] != '+':\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S\")\n\t\t\telse:\n\t\t\t\tunformatted_date = obj['logfile_entry']['timestamp']['datetime']\n\t\t\t\tunformatted_date = unformatted_date[:-3]+unformatted_date[-2:]\n\t\t\t\t# once again, related to missing features in Python 3.6\n\t\t\t\tdate = datetime.datetime.strptime(unformatted_date,\"%Y-%m-%dT%H:%M:%S.%f%z\")\n\t\t\treturn logfile_entry(obj['logfile_entry']['id'], file, obj['logfile_entry']['message'], obj['logfile_entry']['structured_data'], date,obj['logfile_entry']['hostname'],obj['logfile_entry']['source'])\n\t\treturn obj", "def __init__(self, entry):\n \n self.lastChangedDate = entry.time\n self.size = entry.size\n self.kind = entry.kind\n self.logMessage = None", "def parse_entry(msg):\n values = msg.split(';')\n return {\n 'dt': datetime.strptime(\n values[0], '%Y-%m-%d %H:%M:%S.%f'),\n 'event': values[1]\n }", "def add_log(self, text, user=None):\n entry = GameLog(game=self, text=text, player=user).save()\n return entry", "def add_log(self, text, user=None):\n entry = GameLog(game=self, text=text, player=user).save()\n return entry", "def ParseRecord(self, parser_mediator, key, structure):\n if key != 'log_entry':\n raise errors.ParseError(\n 'Unable to parse record, unknown structure: {0:s}'.format(key))\n\n month_string = self._GetValueFromStructure(structure, 'month')\n\n year = self._GetValueFromStructure(structure, 'year')\n month = self.MONTHS.get(month_string)\n day = self._GetValueFromStructure(structure, 'day')\n hours = self._GetValueFromStructure(structure, 'hours')\n minutes = self._GetValueFromStructure(structure, 'minutes')\n seconds = self._GetValueFromStructure(structure, 'seconds')\n\n event_data = IOSSysdiagLogEventData()\n event_data.process_identifier = self._GetValueFromStructure(\n structure, 'process_identifier')\n event_data.severity = self._GetValueFromStructure(structure, 'severity')\n event_data.originating_call = self._GetValueFromStructure(\n structure, 'originating_call')\n event_data.body = self._GetValueFromStructure(structure, 'body')\n\n try:\n date_time = dfdatetime_time_elements.TimeElements(\n time_elements_tuple=(year, month, day, hours, minutes, seconds))\n except (TypeError, ValueError):\n parser_mediator.ProduceExtractionWarning('invalid date time value')\n return\n\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n\n parser_mediator.ProduceEventWithEventData(event, event_data)", "def create_entry_for_topic(cls, topic, entry_id, content_hash):\n\t\tkey = cls.create_key(topic, entry_id)\n\t\treturn cls(key_name=key.name(),\n\t\t\t\t\t\t\t parent=key.parent(),\n\t\t\t\t\t\t\t entry_id=entry_id,\n\t\t\t\t\t\t\t entry_id_hash=utils.sha1_hash(entry_id),\n\t\t\t\t\t\t\t entry_content_hash=content_hash)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def process_log_line(self, line):\n int_map = self.int_map\n timestamp = line[0:26]\n if len(timestamp) >= 26:\n msg = {}\n try:\n # %Y-%m-%d %H:%M:%S.%f - 2017-06-27 13:46:10.048844\n day = int_map[timestamp[8:10]]\n hour = int_map[timestamp[11:13]]\n minute = int_map[timestamp[14:16]]\n second = int_map[timestamp[17:19]]\n usecond = int_map[timestamp[20:22]] * 10000 + \\\n int_map[timestamp[22:24]] * 100 + int_map[timestamp[24:26]]\n event_time = (hour * 3600.0 + minute * 60.0 + second) + (usecond / 1000000)\n if day == self.start_day:\n elapsed = event_time - self.start_time\n else:\n elapsed = event_time + (float(3600 * 24) - self.start_time)\n msg['timestamp'] = elapsed\n if msg['timestamp'] >= 0:\n offset = line.find(']: ', 32)\n if offset >= 0:\n try:\n thread = line[34:offset]\n separator = thread.find(':')\n if separator >= 0:\n thread = thread[separator + 1:].strip()\n msg['thread'] = thread\n msg['level'] = line[offset + 3:offset + 4]\n msg_start = line.find(' ', offset + 5)\n if msg_start >= 0:\n msg['category'] = line[offset + 5:msg_start]\n msg['message'] = line[msg_start + 1:]\n if msg['category'] == 'nsHttp':\n if msg['thread'] == 'Main Thread':\n self.main_thread_http_entry(msg)\n elif msg['thread'] == 'Socket Thread':\n self.socket_thread_http_entry(msg)\n elif msg['category'] == 'nsSocketTransport':\n self.socket_transport_entry(msg)\n elif msg['category'] == 'nsHostResolver':\n self.dns_entry(msg)\n except Exception:\n logging.exception('Error processing log line')\n except Exception:\n pass", "def create_log(self):\n from settings import evidence_path\n test_case = self.__class__.__name__\n log_extension = '.log'\n if evidence_path is not None:\n log_path = '{}/{}{}'.format(\n evidence_path, test_case, log_extension\n )\n else:\n log_path = None\n self.log = Log(log_path)\n self.log = self.log.get_logger()\n return self.log", "def test_create_log(self):\n log = self.log\n\n self.assertTrue(isinstance(log, Log))\n self.assertEqual(log.name, \"Test Log\")", "def parseLog(self, log_lines):\n abstract", "def from_str(cls, line):\n match = cls._re.search(line)\n if not match:\n return cls(None, None)\n groups = [int(d) for d in match.groups()]\n ymdhm1 = groups[:5]\n ymdhm2 = groups[5:10]\n hm3 = groups[10:]\n return cls(\n datetime.datetime(*ymdhm1),\n datetime.datetime(*ymdhm2),\n hm3[0] * 60 + hm3[1],\n )", "def create_from_arg_string(cls, arg_string):\n return cls()", "def __add_log(self, logType: int, message: str) -> None:\n\n if isinstance(message, BaseException):\n ex: BaseException = message\n if hasattr(ex, 'message'):\n message = ex.message\n else:\n message = ex.__str__()\n\n message += f'\\n{traceback.format_exc().__str__()}'\n\n if message is None:\n return\n\n if isinstance(message, str) and message.strip().__len__() == 0:\n return\n\n st = stack()\n caller: Traceback = getframeinfo(st[2][0])\n log = LogModel()\n log.log_level = logType\n log.filename = caller.filename\n log.function = caller.function\n log.line_number = caller.lineno\n log.message = message\n log.creation_date = datetime.now()\n\n self.__logs.append(log)", "def from_string(cls, string):\n m = re.match(r\"([0-9- :]+)\" # Timestamp (group 1)\n r\",[0-9]+\\s\" # Timestamp ms (ignored)\n r\"([A-Z]+)\\s+\" # Debug level (group 2)\n r\"(\\d+)\\s+\" # PID (group 3)\n r\"\\[\"\n r\"([^]]+)\\s*\" # Module name (group 4)\n r\"\\]:\\s+\"\n r\"(.*)\", # Debug message (group 5)\n string)\n if not m:\n raise DebugFormatError(\n \"Failed to match {} against the expected format\".format(\n string))\n ts = datetime.datetime.strptime(m.group(1), TS_FORMAT)\n return DebugStmt(ts, m.group(2), int(m.group(3)),\n m.group(4).strip(),\n m.group(5))", "def from_str(cls, string):", "def log_decode(log_data):\n data = json.loads(log_data.data)\n timestamp = log_data.timestamp\n return (data, timestamp)", "def test_create_from_json(self):\n json_string = (\n r'{\"filter\": \"logName:\\\"logs/cloudaudit.googleapis.com\\\"\", '\n r'\"destination\": \"storage.googleapis.com/big-logs-bucket\", '\n r'\"name\": \"a-log-sink\", '\n r'\"writerIdentity\": \"serviceAccount:a-log-sink@logging-123456789.'\n r'iam.gserviceaccount.com\", '\n r'\"outputVersionFormat\": \"V2\"}')\n\n sink = log_sink.LogSink.from_json(self.proj_1, json_string)\n\n self.assertEqual('a-log-sink', sink.id)\n self.assertEqual('sink', sink.type)\n self.assertEqual('projects/proj-1/sinks/a-log-sink', sink.name)\n self.assertEqual('logName:\"logs/cloudaudit.googleapis.com\"',\n sink.sink_filter)\n self.assertEqual('storage.googleapis.com/big-logs-bucket',\n sink.destination)\n self.assertFalse(sink.include_children)\n self.assertEqual(json.loads(json_string), json.loads(sink.raw_json))", "def from_dict(self, dict_entry, line_length=80):\r\n try:\r\n # Set the entry object's attributes to the corresponding\r\n # values in the dictionary entry. Type conversions need to\r\n # be done for non-string attributes.\r\n for key in dict_entry:\r\n dict_entry[key] = self._convert_dict_key(dict_entry[key])\r\n # end for\r\n # Go through the attributes and set them.\r\n if self._validate_dict_entry(dict_entry) or self.info is not None:\r\n try:\r\n for attr in self.FIELDNAMES:\r\n setattr(self, attr, dict_entry[attr])\r\n # end for\r\n return True\r\n except Exception as err:\r\n wl_resource.print_status(\r\n \"Error\", f\"Error creating entry: {err}\",\r\n line_length=line_length)\r\n # end try\r\n else:\r\n return False\r\n except Exception as err:\r\n _z_exc(\"logentry.py/from_dict\", err)\r\n # end try\r", "def build_log_entry(\n hostname: str, user: str, date: dt.datetime, wdir: Path, cmd: str\n) -> str:\n return (\n f'[{date.strftime(\"%Y-%m-%d %H:%M:%S\")}] ({user}@{hostname}) '\n f\"{wdir}\\n\\t{cmd}\\n\"\n )", "def parse_log_file(log_filename, pod, filters=None, make_dict=False, objref_dict=None):\n log = gcs_async.read(log_filename).get_result()\n if log is None:\n return {}, False if make_dict else None\n if pod:\n bold_re = regex.wordRE(pod)\n else:\n bold_re = regex.error_re\n if objref_dict is None:\n objref_dict = {}\n if make_dict and pod:\n return kubelet_parser.make_dict(log.decode('utf8', 'replace'), bold_re, objref_dict)\n else:\n return log_parser.digest(log.decode('utf8', 'replace'),\n error_re=bold_re, filters=filters, objref_dict=objref_dict)", "def parse_line(line):\n log_line = LogLine(line)\n dt = datetime.datetime.strptime(log_line.line[0], \"%Y-%m-%d %H:%M:%S\")\n # make a tuple with dt and the rest (splatted)\n return (dt, *log_line.line[1:])", "def from_text(cls, text):\n raw = json.loads(text)\n event_msg = EventMessage.from_text(raw[\"event_message\"])\n msg = cls(\n uri_name=raw[\"uri_name\"],\n event_message=event_msg,\n publisher_connection_id=raw[\"publisher_connection_id\"]\n )\n msg.publisher_node_id = raw[\"publisher_node_id\"]\n return msg", "def parse_entry(self, entry_string):\n entry_type, entry_string = self.pop_entry_type(entry_string)\n cite_key, entry_string = self.pop_key(entry_string)\n field_dict = dict(self.extract_fields(entry_string))\n field_dict[\"type\"] = entry_type\n self.force_field.citations[cite_key] = field_dict", "def from_str(cls, data: AnyStr) -> \"JobManifest\":\n as_dict = json.loads(data)\n as_dict[\"creation_time\"] = datetime.datetime.fromisoformat(\n as_dict[\"creation_time\"]\n )\n return cls(**as_dict)", "def deserialize(cls, record):\n return cls(\n source=record.get(\"source\", \"\"),\n category=record.get(\"category\", \"\"),\n name=record.get(\"name\", \"\"),\n message=record.get(\"message\", \"\"),\n timestamp=record.get(\"timestamp\", \"\"),\n **record[\"data\"],\n )", "def test_create_from_dict(self):\n sink_dict = {\n 'name': 'another-log-sink',\n 'destination': 'pubsub.googleapis.com/projects/my-logs/topics/logs',\n 'outputVersionFormat': 'V2',\n 'includeChildren': True,\n 'writerIdentity': (\n 'serviceAccount:[email protected]'),\n }\n\n sink = log_sink.LogSink.from_dict(self.folder_56, sink_dict)\n\n self.assertEqual('another-log-sink', sink.id)\n self.assertEqual('sink', sink.type)\n self.assertEqual('folders/56/sinks/another-log-sink', sink.name)\n self.assertEqual('', sink.sink_filter)\n self.assertEqual('pubsub.googleapis.com/projects/my-logs/topics/logs',\n sink.destination)\n self.assertTrue(sink.include_children)\n self.assertEqual(sink_dict, json.loads(sink.raw_json))", "def __init__(self, web_log_id=None, log_date_time=None, machine_name=None, base_url=None, duration=None, entry_id=None, active_users=None, logged_in_users=None, web_section_id=None, comments=None, date_modified=None): # noqa: E501 # noqa: E501\n\n self._web_log_id = None\n self._log_date_time = None\n self._machine_name = None\n self._base_url = None\n self._duration = None\n self._entry_id = None\n self._active_users = None\n self._logged_in_users = None\n self._web_section_id = None\n self._comments = None\n self._date_modified = None\n self.discriminator = None\n\n if web_log_id is not None:\n self.web_log_id = web_log_id\n if log_date_time is not None:\n self.log_date_time = log_date_time\n if machine_name is not None:\n self.machine_name = machine_name\n if base_url is not None:\n self.base_url = base_url\n if duration is not None:\n self.duration = duration\n if entry_id is not None:\n self.entry_id = entry_id\n if active_users is not None:\n self.active_users = active_users\n if logged_in_users is not None:\n self.logged_in_users = logged_in_users\n if web_section_id is not None:\n self.web_section_id = web_section_id\n if comments is not None:\n self.comments = comments\n if date_modified is not None:\n self.date_modified = date_modified", "def from_string(cls, dlstr):\n\n NotImplementedError(\"Should be implemented by subclass\")", "def parse(file):\n logger.info('parsing DL7 dive log data')\n log = Log()\n content = file.readline()\n while not content == '':\n __parse_line(log, content)\n content = file.readline()\n return log", "def from_raw(cls, buffer: bytes):\n assert len(buffer) == 304\n block_hash = to_str(buffer[:32])\n height = to_int(buffer[32:36])\n nonce = to_int(buffer[36:40])\n time = to_int(buffer[40:48])\n prevBlock = to_str(buffer[48:80])\n treeRoot = to_str(buffer[80:112])\n extraNonce = to_str(buffer[112:136])\n reservedRoot = to_str(buffer[136:168])\n witnessRoot = to_str(buffer[168:200])\n merkleRoot = to_str(buffer[200:232])\n version = to_int(buffer[232:236])\n bits = to_int(buffer[236:240])\n mask = to_str(buffer[240:272])\n chainwork = to_str(buffer[272:304])\n return ChainEntry(\n block_hash,\n height,\n nonce,\n time,\n prevBlock,\n treeRoot,\n extraNonce,\n reservedRoot,\n witnessRoot,\n merkleRoot,\n version,\n bits,\n mask,\n chainwork,\n )", "def __init__(self, line):\n # Throw an exception if we don't see the parenthesis that mark a history entry\n if not line[108] == '(':\n raise ParsingException\n if not line[138:139] == ')':\n raise ParsingException\n\n self.status = line[109:122].strip()\n self.time_stamp = datetime.strptime(line[122:138], '%m/%d/%Y %H:%M')", "def create_instance(data, logging_file_name):\n Utils.logging(data, logging_file_name)", "def _build_data_from_text(self, text):\n try:\n record = json.loads(text)\n except Exception as e:\n logging.error(f\"Exception: {e}\")\n logging.error(f\"datapoint: {text}\")\n raise e\n return record", "def from_string(cls, dlstr):\n raise NotImplementedError(\"Should be implemented by subclass\")", "def from_str(cls, timestamp_str):\n units = timestamp_str.split(\":\")\n seconds_ms = units[-1].split(\".\")\n hours = int(units[0])\n minutes = int(units[1])\n seconds = int(seconds_ms[0])\n milliseconds = int(seconds_ms[1])\n return cls(hours, minutes, seconds, milliseconds)", "def from_string(cls, string):\n normalised = cls.normalise_string(string)\n return cls.from_normalised_string(normalised)", "def parse(self, line):\n try:\n (year, month, day, hour, minute, second, microseconds, offset_hour, offset_minute, source, process, logentry) = re.match('^(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d)T(\\d\\d):(\\d\\d):(\\d\\d)\\.([\\d]+)\\+(\\d\\d):(\\d\\d) ([a-z]+)\\[([a-zA-Z0-9_.]+)\\]: ([0-9a-z-A-Z\\-_\\.\\[\\]:\\?\\#\\\",/\\ ={}\\'\\(\\)<>]+)$', line).groups()\n except:\n pass\n \n try:\n parsed_data = dict()\n parsed_data['timestamp'] = \" \".join([\"-\".join([year, month, day]), \":\".join([hour, minute, second])])\n parsed_data['log_time'] = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n parsed_data['log_source'] = source\n parsed_data['log_type'] = process\n except (AttributeError, UnboundLocalError):\n PARSE_ERRORS.append(line)\n return False\n\n #TODO: This still needs work on spaces in values surrounded by \" \" \n if parsed_data['log_source'] == \"heroku\":\n if logentry.__len__() > 1:\n logentry = re.sub(', ', ',', logentry)\n line_chunks = re.split(' ', logentry)\n for chunk in line_chunks:\n line_chunks = re.split('=', chunk)\n if line_chunks.__len__() > 2:\n #fwd and path are a little clunky to parse\n pass\n elif line_chunks.__len__() > 1:\n parsed_data[line_chunks[0]] = line_chunks[1]\n else:\n pass\n else:\n return False\n else:\n # TODO: [app] \n # Needs parsing. Do that here.\n return False\n\n return parsed_data", "def create_entry(self, entry_group_name, entry_id, entry):\n try:\n entry = self.__datacatalog.create_entry(parent=entry_group_name,\n entry_id=entry_id,\n entry=entry)\n self.__log_entry_operation('created', entry=entry)\n return entry\n except (exceptions.FailedPrecondition,\n exceptions.PermissionDenied) as e:\n entry_name = '{}/entries/{}'.format(entry_group_name, entry_id)\n self.__log_entry_operation('was not created',\n entry_name=entry_name)\n raise e", "def from_string(cls, dlstr):\n\n mode = \"new\" # the default\n\n try:\n tokens = dlstr.lower().split()\n\n if tokens[0] != TransitionMatrix.key:\n raise ValueError\n\n nout = int(tokens[1])\n n_upd = int(tokens[2])\n\n try:\n mode = str(tokens[3])\n except IndexError:\n # assume optional argument not present\n pass\n\n except (IndexError, ValueError):\n usage = \"fed method tm nout n_upd [mode]\"\n raise ValueError(\"Expected {!r}: got {!r}\".format(usage, dlstr))\n\n return TransitionMatrix(nout, n_upd, mode)", "def parse_tracelogging_event(bv: binaryninja.binaryview.BinaryView, stream: Stream) -> Event:\n channel = stream.read_u8()\n if channel != 11:\n raise ETWBreakerUnexpectedToken(11, channel)\n level = stream.read_u8()\n opcode = stream.read_u8()\n keyword = stream.read_u64()\n size = stream.read_u16()\n stream.read(size - 2)\n return Event(bv, 0, 0, channel, level, opcode, 0, keyword)", "def parseLog(self, log):\n return 0", "def mapLogRecord(self, record):\n newrec = record.__dict__\n for p in self.params:\n newrec[p] = self.params[p]\n maxParamLength = 4000\n # truncate and clean the message from non-UTF-8 characters\n try:\n newrec['msg'] = newrec['msg'][:maxParamLength].decode('utf-8', 'ignore').encode('utf-8')\n except Exception:\n pass\n try:\n newrec['message'] = newrec['message'][:maxParamLength].decode('utf-8', 'ignore').encode('utf-8')\n except Exception:\n pass\n return newrec", "def __init__( self, logger=None, level=None, name=None, sentry=None ):\n\n # if no logger is specified then create a new one\n if logger == None:\n logger = logging.getLogger( 'quiddi' )\n\n # if a Sentry object has been passed then store it\n if sentry != None:\n self.__sentry = sentry\n\n # use the logging level of the existing entity if there is non specified. If no logger is valid then use INFO\n if level == None:\n level = logger.getEffectiveLevel()\n\n # set the logging level\n logger.setLevel( level )\n\n # create the syslog handler and add it to the base handler passed in as logger\n sh = logging.handlers.SysLogHandler( address='/dev/log', facility=logging.handlers.SysLogHandler.LOG_LOCAL6 )\n\n # set the level to that defined when the object is created\n \"\"\"\n Severity Keyword Description General Description\n Critical crit Critical conditions. Should be corrected immediately, but indicates failure in a secondary system,\n an example is a loss of a backup ISP connection.\n Error err (error) Error conditions. Non-urgent failures, these should be relayed to developers or admins;\n each item must be resolved within a given time.\n Warning warning (warn) Warning conditions. Not an error, but indication that an error will occur if action is not taken,\n e.g. file system 85% full - each item must be resolved within a given time.\n Informational info Informational messages. Normal operational messages - may be harvested for reporting, measuring throughput,\n etc. - no action required.\n Debug debug Debug-level messages. Info useful to developers for debugging the application, not useful during operations.\n \"\"\"\n sh.setLevel( level )\n formatter = logging.Formatter( '%(name)s - %(levelname)s - %(message)s' )\n sh.setFormatter( formatter )\n logger.addHandler( sh )\n\n # set a name if one hasn't been provided\n if name == None:\n program_name = ( sys.argv[0].split( '/' )[-1] ).split( '.' )\n del program_name[-1]\n name = '.'.join( program_name )\n\n # get the handle for it\n self.__logger = logging.getLogger( name )", "def parseApacheLogLine(logline):\r\n match = re.search(APACHE_ACCESS_LOG_PATTERN, logline) # Matching pattern with each element\r\n size_field = match.group(9) # Get size fields\r\n if size_field == '-': \r\n size = long(0) # make it zero\r\n else:\r\n size = long(match.group(9)) # else convert it to long format\r\n return (Row( # Return the extracted data in row format for easy access\r\n host = match.group(1),\r\n client_identd = match.group(2),\r\n user_id = match.group(3),\r\n date_time = parse_apache_time(match.group(4)),\r\n method = match.group(5),\r\n endpoint = match.group(6),\r\n protocol = match.group(7),\r\n response_code = int(match.group(8)),\r\n content_size = size\r\n ))", "def add_entry(self, *args, **kwargs):\n entry = Entry(*args, **kwargs) # NOTE: not sure this is good\n self._entries[entry.uuid] = entry\n return entry", "def _CreateQuickLog(namespace, key):\n namespaced_key = '%s__%s' % (namespace, key)\n log = QuickLog(id=namespaced_key)\n log.put()\n return log", "def log_builder(self, log_level, hrtimestamp, datestamp, timestamp, log_msg, tags):\n log_body = {}\n log_body[\"filename\"] = self.filename\n log_body[\"log_level\"] = log_level\n log_body[\"hrtimestamp\"] = hrtimestamp\n log_body[\"datestamp\"] = datestamp\n log_body[\"timestamp\"] = timestamp\n log_body[\"log_msg\"] = log_msg\n log_body[\"tags\"] = tags\n return log_body", "def _parse_audit_entry(entry):\n try:\n integralstor_action_dict = {\n \"create_alert_notification\": \"Alert notification created.\",\n \"delete_alert_notification\": \"Alert notification deleted.\",\n \"create_audit_notification\": \"Audit notification created.\",\n \"delete_audit_notification\": \"Audit notification deleted.\",\n \"update_system_datetimezone\": \"Updated system date/time/timezone\",\n \"update_manifest\": \"System manifest updated\",\n \"update_ntp_servers\": \"Updated NTP server configuration\",\n \"ntp_sync\": \"Performed manual NTP time sync\",\n 'delete_remote_monitoring_server': 'Removed remote monitoring server',\n 'update_remote_monitoring_server': 'Created/updated remote monitoring server',\n \"factory_defaults_reset\": \"Factory defaults reset\",\n \"delete_certificate\": \"Deleted a SSL certificate\",\n \"edit_aces\": \"Access control entry modified\",\n \"add_aces\": \"Access control entry created\",\n \"delete_ace\": \"Access control entry removed\",\n \"create_dir\": \"Directory created\",\n \"create_self_signed_certificate\": \"Created a self signed SSL certificate\",\n \"upload_certificate\": \"Uploaded a SSL certificate\",\n \"add_zfs_spares\": \"Spare disk(s) added to pool\",\n \"schedule_zfs_snapshot\": \"Snapshot scheduling added/modified\",\n \"remove_zfs_spare\": \"Spare disk removed from pool\",\n \"remove_zfs_quota\": \"Removed ZFS quota\",\n \"set_zfs_quota\": \"Set ZFS quota\",\n \"create_vlan\": \"Created network VLAN\",\n \"remove_vlan\": \"Removed network VLAN\",\n \"modify_local_user_gid\": \"Local user's primary group set\",\n \"modify_local_user_grp_membership\": \"Local user's group membership modified\",\n \"create_local_user\": \"Local user created\",\n \"create_local_group\": \"Local group created\",\n \"delete_local_group\": \"Local group removed\",\n \"delete_local_user\": \"Local user removed\",\n \"change_local_user_password\": \"Local user password modified\",\n \"modify_dir_owner_permissions\": \"Directory ownership/permissions modified\",\n \"modify_dir_sticky_bit\": \"Directory sticky bit modified\",\n \"modify_cifs_share\": \"CIFS share modified\",\n \"delete_cifs_share\": \"CIFS share removed\",\n \"create_cifs_share\": \"CIFS share created\",\n \"modify_samba_settings\": \"CIFS authentication settings modified\",\n \"delete_nfs_share\": \"NFS share removed\",\n \"edit_nfs_share\": \"NFS share modified\",\n \"create_nfs_share\": \"NFS share created\",\n \"create_iscsi_target\": \"ISCSI target created\",\n \"delete_iscsi_target\": \"ISCSI target removed\",\n \"create_iscsi_lun\": \"ISCSI LUN created\",\n \"delete_iscsi_lun\": \"ISCSI LUN removed\",\n \"add_iscsi_target_authentication\": \"ISCSI target authentication added\",\n \"remove_iscsi_target_authentication\": \"ISCSI target authentication removed\",\n \"add_iscsi_acl\": \"ISCSI ACL added\",\n \"remove_iscsi_acl\": \"ISCSI ACL removed\",\n \"change_service_status\": \"Service status modified\",\n \"set_interface_state\": \"Network interface state modified\",\n \"edit_interface_address\": \"Network interface address modified\",\n \"create_bond\": \"Network interface bond created\",\n \"remove_bond\": \"Network interface bond removed\",\n \"edit_hostname\": \"System hostname modified\",\n \"set_dns_nameservers\": \"DNS nameservers modified\",\n \"modify_admin_password\": \"Administrator password modified\",\n \"create_zfs_pool\": \"ZFS pool created\",\n \"expand_zfs_pool\": \"ZFS pool expanded\",\n \"import_zfs_pool\": \"ZFS pool imported\",\n \"export_zfs_pool\": \"ZFS pool exported\",\n \"scrub_zfs_pool\": \"ZFS pool scrub initiated\",\n \"delete_zfs_pool\": \"ZFS pool removed\",\n \"edit_zfs_slog\": \"ZFS pool write cache modified\",\n \"remove_zfs_slog\": \"ZFS pool write cache removed\",\n \"edit_zfs_l2arc\": \"ZFS pool read cache modified\",\n \"remove_zfs_l2arc\": \"ZFS pool read cache removed\",\n \"edit_zfs_dataset\": \"ZFS dataset modified\",\n \"delete_zfs_dataset\": \"ZFS dataset removed\",\n \"create_zfs_zvol\": \"ZFS block device volume created\",\n \"delete_zfs_zvol\": \"ZFS block device volume removed\",\n \"create_zfs_dataset\": \"ZFS dataset created\",\n \"create_zfs_snapshot\": \"ZFS snapshot created\",\n \"delete_zfs_snapshot\": \"ZFS snapshot removed\",\n \"rollback_zfs_snapshot\": \"ZFS snapshot rolled back\",\n \"replace_disk_offline_disk\": \"Disk replacement - old disk offlined\",\n \"replace_disk_replaced_disk\": \"Disk replacement - disk replaced successfully\",\n \"rename_zfs_snapshot\": \"ZFS snapshot renamed\",\n \"create_rsync_share\": \"Created new RSync share \",\n \"edit_rsync_share\": \"Edited RSync share \",\n \"delete_rsync_share\": \"Deleted RSync share \",\n \"remove_background_task\": \"Removed background task \",\n \"create_remote_replication\": \"Created remote replication \",\n \"modify_remote_replication\": \"Modified remote replication \",\n \"remove_remote_replication\": \"Removed remote replication \",\n \"task_fail\": \"Task failed \",\n \"task_start\": \"Task started \",\n \"task_complete\": \"Task completed \",\n \"remove_ssh_user_key\": \"Removed ssh user key \",\n \"upload_ssh_user_key\": \"Uploaded ssh user key \",\n \"remove_ssh_host_key\": \"Removed ssh host key \",\n \"upload_ssh_host_key\": \"Uploaded ssh host key \",\n }\n\n action_dict = integralstor_action_dict\n\n d = {}\n\n d['time'], err = datetime_utils.convert_from_epoch(\n entry['audit_time'], return_format='str', str_format='%c', to='local')\n if err:\n raise Exception(err)\n\n d[\"ip\"] = entry['source_ip']\n d[\"username\"] = entry['username']\n action = entry['audit_code']\n if action in action_dict:\n d[\"action\"] = action_dict[action]\n else:\n d[\"action\"] = \"Unknown\"\n d[\"action_str\"] = entry['audit_str']\n d[\"audit_id\"] = entry['audit_id']\n\n except Exception, e:\n return None, 'Error decoding audit entry: %s' % (e)\n else:\n return d, None", "def add_log_entry(self, log_entry):\n self.log_entries.append(log_entry)", "def __init__(self, node, line):\n m = self.log_juba.match(line)\n if m:\n self.node = node\n self.type = 'jubatus'\n self.level = LogLevel.normalize(m.group(9))\n self.time = datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)), int(m.group(6)), int(m.group(7)) * 1000)\n self.thread_id = int(m.group(8))\n self.source = m.group(10)\n self.source_line = m.group(11)\n self.message = self.log_juba.sub('', line)\n return\n m = self.log_zk.match(line)\n if m:\n self.node = node\n self.type = 'zookeeper'\n self.level = LogLevel.normalize(m.group(10))\n self.time = datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)), int(m.group(6)), int(m.group(7)) * 1000)\n self.thread_id = int(m.group(8))\n self.handle = m.group(9)\n self.source = m.group(11)\n self.source_line = m.group(12)\n self.message = self.log_zk.sub('', line)\n return\n raise JubaTestAssertionError('invalid log format: %s' % line)", "def view(\n id: int = typer.Argument(\n ...,\n help=\"ID of the log entry\"\n )\n):\n manager = LogBookManager()\n log_entry = manager.get(id)\n\n if log_entry:\n log_entry_id = (\n typer.style(\"Log Entry ID: \", fg=typer.colors.BRIGHT_BLUE, bold=True) +\n str(log_entry.id)\n )\n typer.echo(log_entry_id)\n\n log_datetime = (\n typer.style(\"Log Date & Time: \", fg=typer.colors.BRIGHT_BLUE, bold=True) +\n log_entry.log_datetime.strftime(\"%Y-%m-%d %I:%M %p\")\n )\n typer.echo(log_datetime)\n\n typer.echo(\n typer.style(\"\\nDescription:\\n\", fg=typer.colors.BRIGHT_BLUE, bold=True)\n )\n typer.echo(log_entry.description + '\\n')\n\n created_at = (\n typer.style(\"Created at: \", fg=typer.colors.BRIGHT_BLUE, bold=True) +\n log_entry.created_at.strftime(\"%Y-%m-%d %I:%M %p\")\n )\n typer.echo(created_at)\n\n updated_at = (\n typer.style(\"Updated at: \", fg=typer.colors.BRIGHT_BLUE, bold=True) +\n log_entry.updated_at.strftime(\"%Y-%m-%d %I:%M %p\")\n )\n typer.echo(updated_at)\n else:\n typer.echo(\n typer.style(\n f'No Log Entry Found with id={id}',\n fg=typer.colors.RED,\n bold=True\n )\n )", "def _record(self):\n record_attr = {\n 'name': 'test_record',\n 'level': 'ERROR',\n 'pathname': '/test/path',\n 'msg': 'This is a test record.',\n }\n record = logging.makeLogRecord(record_attr)\n return record", "def create_entry(validator):\n entry = ValidationEntry()\n entry.setValidator(validator.build(entry))\n return entry", "def new_log_stream(self, log_name):\n try:\n if log_name == 'logging':\n logstream = self.logging\n if logstream.closed:\n raise ValueError('Attempting to open closed logstream %r' % log_name)\n return logstream\n\n return self._new_log_stream(log_name)\n except:\n LOG.exception('new_log_stream %r: %r', self._step.name, log_name)\n raise", "def add_entry(name, title, duration, notes):\n clear()\n print('Entry added to work log!')\n return Entry.create(\n employee_name=name,\n task_title=title,\n time_spent=duration,\n task_notes=notes\n )", "def from_str(cls, s):\n raise NotImplementedError", "def from_str(cls, line) -> \"VersionStructure\":\n major, minor, patch = [int(item) for item in line.split(\".\")]\n return cls(major=major, minor=minor, patch=patch)", "def add(\n description: str = typer.Argument(\n ...,\n help=\"Description of the log entry\"\n ),\n date: datetime = typer.Option(\n datetime.now().strftime(\"%Y-%m-%d\"), '--date', '-d',\n help=\"Date of the log entry\"\n ),\n time: datetime = typer.Option(\n datetime.now().strftime(\"%I:%M %p\"), '--time', '-t',\n formats=[\"%H:%M:%S\", \"%I:%M %p\"],\n help=\"Time of the log entry\"\n )\n):\n log_entry_time = time.time()\n log_datetime = datetime.combine(date, log_entry_time)\n\n manager = LogBookManager()\n created, message = manager.create(description, log_datetime)\n\n if created:\n typer.echo(\n typer.style(message, fg=typer.colors.GREEN, bold=True)\n )\n else:\n typer.echo(\n typer.style(message, fg=typer.colors.RED, bold=True)\n )", "def from_file(self, filename=None):\n if not self.name:\n #we don't have a file associated with the EntryList:\n if not filename:\n print \"UNKNOWN FILE!\"\n exit\n else:\n self.name = filename\n \n elif filename and filename != self.name:\n #ambiguous which file to use\n print \"different file than what log was initialized with\"\n exit\n \n else:\n #we have an original filename and none passed in\n #or the original filename equals the one passed in\n #should be good to go\n pass\n\n if os.path.exists(self.name):\n\n #f = open(self.name, \"U\")\n #2009.04.02 20:44:31 \n #very strange behavior when opening up utf-8 files\n #characters get reincoded\n #this is especially prominent when using check_feed.py\n #was using latin_1... going back to utf-8\n #f = codecs.open(self.name, encoding='latin_1')\n #codecs.ignore_errors(UnicodeDecodeError) \n f = codecs.open(self.name, encoding='utf-8', errors='ignore')\n\n self.write(f.read())\n f.close\n\n self.seek(0)\n\n else:\n print \"NO FILE ASSOCIATED WITH LOG: %s\" % self.name", "def __build_message_to_print_in_log(log: LogModel) -> Optional[str]:\n\n if log is None:\n return None\n\n log_level_name: str = LogHelper.get_log_level_name(log.log_level)\n message: str = \\\n f'{log.creation_date} |->\\t[{log_level_name}]\\t{log.message}\\t\\t[Line: {log.line_number}]\\t[{log.filename}]'\n\n return message", "def parse_apache_log_line(logline):\n match = re.search(APACHE_ACCESS_LOG_PATTERN, logline)\n if match is None:\n return logline, 0\n size_field = match.group(9)\n if size_field == '-':\n size = long(0)\n else:\n size = long(match.group(9))\n return (Row(\n host=match.group(1),\n client_identd=match.group(2),\n user_id=match.group(3),\n date_time=parse_apache_time(match.group(4)),\n method=match.group(5),\n endpoint=match.group(6),\n protocol=match.group(7),\n response_code=int(match.group(8)),\n content_size=size\n ), 1)", "def parse_instant_string(cls, inst_str):\n #return dt.datetime.strptime(inst_str, '%Y-%m-%d %H:%M:%S')\n if inst_str is None or inst_str.__eq__('LATEST'):\n return None\n else:\n try:\n return Instant.parse(inst_str)\n except TypeError, e:\n raise TypeError(e)", "def create_log_entry_when_user_logs_in(sender, request, user, **kwargs):\n create_user_log(\n request=request,\n user=user,\n type=_account_const.AUTHENTICATION,\n action=_account_const.LOGIN\n )", "def to_entries(self, add_tags=[], add_time=False, moments_only=False):\n entries = []\n\n entry_regex = \"\\*\"\n entry_search = re.compile(entry_regex)\n\n cur_entry = Moment()\n cur_entry.path = self.name\n\n new_entry = None\n \n try:\n self.seek(0)\n line = self.readline()\n line = unicode(line)\n except:\n print \"Problem reading file\"\n return entries\n\n #first line of a log should have an entry... this is our check\n if entry_search.match(line):\n self.has_entries = True\n while line:\n #we might have found a new entry...\n #see what kind, if any:\n (ts, line_tags) = timestamp.parse_line_for_time(line)\n if ts:\n new_entry = Moment()\n new_entry.created = timestamp.Timestamp(ts)\n elif entry_search.match(line): \n if not moments_only:\n new_entry = Moment()\n elif add_time and moments_only:\n #ok to make a default time for the entry\n new_entry = Moment()\n print \"no timestamp found in this entry\"\n else:\n #must be moments only,\n #but we don't want to add a timestamp\n #just include the data with the previous moment\n new_entry = None\n\n if new_entry:\n #finish up last entry...\n #only need to add if it had information\n if cur_entry.data or cur_entry.tags:\n entries.append(cur_entry)\n\n new_entry.path = self.name\n\n current_tags = line_tags.strip().split()\n\n if add_tags:\n temp_tags = add_tags[:]\n for t in current_tags:\n if t not in temp_tags:\n temp_tags.append(t)\n current_tags = temp_tags\n\n new_entry.tags.extend(current_tags)\n cur_entry = new_entry\n new_entry = None\n\n else:\n # only want to add the entry itself\n cur_entry.data += line\n\n line = unicode(self.readline())\n \n #need to get the last entry from the file, if there is one.\n if cur_entry.data:\n entries.append(cur_entry)\n\n #if not, don't scan\n else:\n print \"File does not start with an entry: %s\" % self.name\n \n return entries", "def prepare_event_log(log):\n for trace in log:\n attributes = trace.attributes.copy()\n for attribute in attributes:\n trace.attributes[\"t_\" + attribute] = trace.attributes.pop(attribute)\n for event in trace:\n attributes = event._dict.copy()\n for attribute in attributes:\n event._dict[\"e_\" + attribute] = event._dict.pop(attribute)\n return log", "def create_from_string(cls, text):\n parts = text.split('::')\n pcount = len(parts)\n if pcount == 4:\n name = parts[0]\n u_path = parts[1]\n ds_name = parts[2]\n dir_struc = None\n for _ in DirStruc:\n if _.name == ds_name:\n dir_struc = _\n break\n else:\n raise DvczError(\n \"Not the name of a valid dir_struc name: '%s'\" % ds_name)\n\n # 'item access'\n hashtype = HashTypes[parts[3]]\n return Store(name, u_path, dir_struc, hashtype)\n else:\n raise DvczError(\"Invalid Store descriptor: '%s'\" % text)", "def parseApacheLogLine(logline):\n match = re.search(APACHE_ACCESS_LOG_PATTERN, logline)\n if match is None:\n return (logline, 0)\n size_field = match.group(9)\n if size_field == '-':\n size = long(0)\n else:\n size = long(match.group(9))\n return (Row(\n host = match.group(1),\n client_identd = match.group(2),\n user_id = match.group(3),\n date_time = parse_apache_time(match.group(4)),\n method = match.group(5),\n endpoint = match.group(6),\n protocol = match.group(7),\n response_code = int(match.group(8)),\n content_size = size\n ), 1)", "def new_create_log_message(incident_name: str, **kwargs) -> str:\r\n incident_type, incident_code = incident_name.split()\r\n url_name_list = kwargs[\"url_name_list\"] if \"url_name_list\" in kwargs else None\r\n url_name = kwargs[\"url_name\"].lower() if \"url_name\" in kwargs else None\r\n url_path = kwargs[\"url_path\"].lower() if \"url_path\" in kwargs else None\r\n\r\n incidents = {\r\n \"Info\": [\r\n \"JSON was decode\",\r\n f\"Package was download from URL: { url_path }\"\r\n ],\r\n \"Warning\": [\r\n \"JSON is not valid\",\r\n f\"JSON did not loaded from URL: { url_path }\"\r\n ],\r\n \"Error\": [\r\n f\"No version was found in { url_name_list }\",\r\n f\"Package download error from URL: { url_path }\"\r\n ],\r\n \"Disaster\": [\r\n \"No one package was downloaded\"\r\n ]\r\n }\r\n yield f\"{ datetime.now() } -- { incident_type } \\t { url_name }:\\t { incidents[incident_type][int(incident_code)] }\"", "def _parse_result_entry(result):\n entry = ParsedEntry()\n\n if \"content\" in result and len(result.content) > 0:\n entry.content = result.content[0].value\n # if not html, have to escape\n if result.content[0].type not in HTML_MIME_TYPES:\n entry.content = cgi.escape(entry.content)\n elif \"summary_detail\" in result:\n entry.content = result.summary_detail.value\n # if not html, have to escape\n if result.summary_detail.type not in HTML_MIME_TYPES:\n entry.content = cgi.escape(entry.content)\n else:\n entry.content = \"\"\n entry.link = result.get(\"link\", None)\n entry.title = result.get(\"title\", None)\n if \"author_detail\" in result and \"name\" in result.author_detail:\n entry.author = result.author_detail.name\n else:\n entry.author = None\n if \"updated_parsed\" in result and result.updated_parsed is not None:\n entry.date = int(calendar.timegm(result.updated_parsed))\n elif \"published_parsed\" in result and result.published_parsed is not None:\n entry.date = int(calendar.timegm(result.published_parsed))\n else:\n entry.date = int(time.time())\n # try to find something to use as GUID, or fall back to static string\n guid_content = result.get(\"id\", entry.title)\n if guid_content is None:\n guid_content = \"None\"\n entry.guid = hashlib.sha1(guid_content.encode('utf-8')).hexdigest()\n return entry", "def _from_string(cls, serialized):\r\n parse = cls.parse_url(serialized)\r\n\r\n if parse['version_guid']:\r\n parse['version_guid'] = cls.as_object_id(parse['version_guid'])\r\n\r\n return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS})", "def Log(self, message, record_id=None):\n message = str(message)\n record = self._CreateRecord(message, record_id)\n if self._formatter:\n self._formatter.Format(record)\n if len(record.message) > _MAX_MSG_SIZE:\n logging.error('Message must be less than (%s)', _MAX_MSG_SIZE)\n return\n self._records.appendleft(record)\n return record.id", "def create_log_event(message, context):\n try: import config #fix for early logging\n except: config = None #also works around daemon wrappers\n event = {}\n event.update(context)\n event['message'] = message\n event['time'] = time.time()\n event['system'] = context.get('system') or context.get('type') or 'console'\n\n if event.get('error'):\n event['isError'] = True\n\n if event.get('warning'):\n event['message'] = '[WARNING] %s' % event['message']\n\n if event.get('excessive') and config and not config.EXCESSIVE_LOGGING:\n event['discard'] = True\n\n event['message'] = (event['message'],) #stupid hack for twisted...\n return event", "def __init__(self, message_type: LogType, message: str):\n self.timestamp = datetime.datetime.now().strftime(\"%m-%d-%Y %I:%M:%S %p\")\n self.message = message\n self.message_type = message_type", "def _from_string(cls, serialized):\r\n parse = cls.URL_RE.match(serialized)\r\n if not parse:\r\n raise InvalidKeyError(cls, serialized)\r\n\r\n parse = parse.groupdict()\r\n if parse['definition_id']:\r\n parse['definition_id'] = cls.as_object_id(parse['definition_id'])\r\n\r\n return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS})", "def from_string(cls, dlstr):\n\n lines = dlstr.splitlines()\n line = lines.pop(0)\n pfreq = MCMove._parse_move_statement(line)[2]\n\n movers = []\n for line in lines:\n mover = cls.parse_mover(line)\n movers.append(mover)\n\n return cls(pfreq, movers)", "def from_string (cls, string, access=DEFAULT_ACCESS, accept_value=True):\n hKey, moniker, value = cls._from_string (string, access, accept_value)\n if value is None:\n return cls (moniker, access)\n else:\n return cls (moniker, access).get_value (value)" ]
[ "0.8345061", "0.68320376", "0.67686236", "0.6422642", "0.6405456", "0.62253773", "0.6163121", "0.601324", "0.6002534", "0.59997934", "0.5892316", "0.5799083", "0.5459042", "0.54550475", "0.54225075", "0.5376155", "0.53600734", "0.533564", "0.5331905", "0.5318096", "0.5290036", "0.5286071", "0.52860576", "0.52679455", "0.5248397", "0.52271724", "0.52271724", "0.52121204", "0.52093697", "0.5194767", "0.51742506", "0.5160546", "0.5108413", "0.5099711", "0.5080441", "0.50676066", "0.506191", "0.5057407", "0.50547874", "0.5044496", "0.50324637", "0.4992643", "0.49868113", "0.49850944", "0.49824604", "0.49811363", "0.4980005", "0.4950048", "0.49500093", "0.49485987", "0.49446723", "0.49365392", "0.4933833", "0.4933307", "0.4923216", "0.48981932", "0.4860497", "0.4860043", "0.48570213", "0.48467866", "0.48418117", "0.48360023", "0.48241654", "0.4821241", "0.48061782", "0.48036653", "0.4792418", "0.4785683", "0.478175", "0.47783747", "0.47752237", "0.4764118", "0.4762941", "0.4738712", "0.4735768", "0.4735345", "0.47309405", "0.47286874", "0.4721396", "0.4716118", "0.47134516", "0.47048917", "0.46776897", "0.46667552", "0.46662286", "0.46659094", "0.46633473", "0.46540534", "0.46496493", "0.46483147", "0.46465006", "0.46441576", "0.46437743", "0.46414065", "0.46345595", "0.46324128", "0.46287042", "0.46256605", "0.46213704", "0.4619493" ]
0.7880306
1
Create a LogEntry from the given dictionary.
def from_data(data_dict, intrusion=None): # Data is verified in the ctor and setters return LogEntry(vin=data_dict[LogEntry.VIN_FIELD], app_id=data_dict[LogEntry.APP_ID_FIELD], level=data_dict[LogEntry.LEVEL_FIELD], log_message=data_dict[LogEntry.LOG_MESSAGE_FIELD], gps_position=data_dict[LogEntry.GPS_POSITION_FIELD], time_unix=data_dict[LogEntry.TIME_UNIX_FIELD], log_id=data_dict[LogEntry.LOG_ID_FIELD], intrusion=intrusion)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_dict(self, dict_entry, line_length=80):\r\n try:\r\n # Set the entry object's attributes to the corresponding\r\n # values in the dictionary entry. Type conversions need to\r\n # be done for non-string attributes.\r\n for key in dict_entry:\r\n dict_entry[key] = self._convert_dict_key(dict_entry[key])\r\n # end for\r\n # Go through the attributes and set them.\r\n if self._validate_dict_entry(dict_entry) or self.info is not None:\r\n try:\r\n for attr in self.FIELDNAMES:\r\n setattr(self, attr, dict_entry[attr])\r\n # end for\r\n return True\r\n except Exception as err:\r\n wl_resource.print_status(\r\n \"Error\", f\"Error creating entry: {err}\",\r\n line_length=line_length)\r\n # end try\r\n else:\r\n return False\r\n except Exception as err:\r\n _z_exc(\"logentry.py/from_dict\", err)\r\n # end try\r", "def from_log_line(cls, log_line: str):\n match = cls.entry_format.match(log_line)\n if not match:\n return None\n\n date = match.group(\"date\")\n time = match.group(\"time\")\n offset = match.group(\"offset\")\n\n entry = cls(\n ip_address=match.group(\"ip_address\"),\n username=match.group(\"username\"),\n timestamp=f\"{date} {time} {offset}\",\n verb=match.group(\"verb\"),\n path=match.group(\"path\"),\n version=match.group(\"http_version\"),\n status=match.group(\"response_status\"),\n size=match.group(\"size\"))\n\n return entry", "def from_dict(dictionary=dict()):\n list_entry = ListEntry()\n list_entry.set_id(dictionary[\"id\"])\n list_entry.set_name(dictionary[\"name\"])\n list_entry.set_purchasing_user(dictionary[\"purchasingUserId\"])\n list_entry.set_amount(dictionary[\"amount\"])\n list_entry.set_article(dictionary[\"articleId\"])\n list_entry.set_unit(dictionary[\"unit\"])\n list_entry.set_retailer(dictionary[\"retailerId\"])\n list_entry.set_standardarticle(dictionary[\"standardarticle\"])\n list_entry.set_checked(dictionary[\"checked\"])\n list_entry.set_shopping_list(dictionary[\"shoppingListId\"])\n list_entry.set_checked_ts(ListEntry.date_format(dictionary[\"checkedTs\"]))\n list_entry.set_creation_date(ListEntry.date_format(dictionary[\"creationDate\"]))\n list_entry.set_last_updated(ListEntry.date_format(dictionary[\"lastUpdated\"]))\n\n return list_entry", "def create_entry(entry):\n Entry.create(**entry)\n return entry", "def construct_request_obj(cls, dict_):\n\n entry_object = {\"entry\": dict_}\n\n return entry_object", "def create(cls, dictionary):\n return cls(**dictionary)", "def create(cls, dictionary):\n return cls(**dictionary)", "def from_dict(cls, d):\n return cls(**d)", "def from_dict(cls, d):\n return loadd(d, cls)", "def from_dict(cls, dictionary: Dict[str, Any]):\n return cls(**dictionary)", "def from_dict(cls, dictionary):\n normalised = cls.normalise_dict(dictionary)\n return cls.from_normalised_dict(normalised)", "def from_dict(d):\n return eptStaleEvent(**d)", "def from_dict(cls, data):\n return cls(**data)", "def from_dict(cls, dikt) -> 'HosAuthenticationLogsParam':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n level = dictionary.get(\"level\")\r\n status = dictionary.get(\"status\")\r\n status_code = dictionary.get(\"status_code\")\r\n status_code_description = dictionary.get(\"status_code_description\")\r\n timestamp = APIHelper.RFC3339DateTime.from_value(dictionary.get(\"timestamp\")).datetime if dictionary.get(\"timestamp\") else None\r\n\r\n # Return an object of this model\r\n return cls(level,\r\n status,\r\n status_code,\r\n status_code_description,\r\n timestamp)", "def from_dict(cls, data: Dict[str, any]):\n return cls(**data)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n to = dictionary.get('to')\r\n application_id = dictionary.get('applicationId')\r\n expiration_time_in_minutes = dictionary.get('expirationTimeInMinutes')\r\n code = dictionary.get('code')\r\n scope = dictionary.get('scope')\r\n\r\n # Return an object of this model\r\n return cls(to,\r\n application_id,\r\n expiration_time_in_minutes,\r\n code,\r\n scope)", "def from_dict(cls, inp):\n return cls(**{k: v for k, v in inp.items() if k != '__class__'})", "def from_dict(cls, dct):\n return cls(**dct)", "def from_dict(cls, d):\n s = cls()\n s.update_from_dict(d)\n return s", "def from_dict(cls, dict_object):\n\n return cls(**dict_object)", "def from_dict(cls, record, _id=None):\n # copy dict\n record = dict(record)\n\n # get record id and remove it from record\n record_id = record.pop(\"_id\", None)\n if _id is None:\n _id = record_id\n if _id is None:\n _id = cls._make_uuid()\n\n # make record\n return cls(record, _id)", "def from_log_string(log_string):\n\n\t\tfirst_part = None\n\t\tsecond_part = None\n\n\t\tif not log_string.endswith(\"}\"):\n\t\t\t# Value error for later use\n\t\t\tvalue_error = ValueError(\"Given string has invalid format: {}\".format(log_string))\n\n\t\t\tbracket_idx = log_string.find(\"}\")\n\t\t\tlast_comma_idx = log_string.find(\",\", bracket_idx)\n\t\t\tif last_comma_idx != bracket_idx + 1:\n\t\t\t\traise value_error\n\n\t\t\t# The bracket is kept\n\t\t\tfirst_part = log_string[:bracket_idx + 1]\n\t\t\t# The comma is removed\n\t\t\tsecond_part = log_string[last_comma_idx + 1:]\n\t\t\tif \"}\" not in first_part or \"}\" in second_part or \"{\" in second_part:\n\t\t\t\traise value_error\n\n\t\tdata_dict = json.loads(first_part)\n\t\treturn LogEntry.from_data(data_dict, second_part)", "def from_dict(cls, d):\n hostname = d.get('hostname')\n project = d.get('project')\n treeish = d.get('treeish')\n path = d.get('path')\n _validate_args(\n hostname,\n project,\n treeish,\n path,\n path_required=True)\n return cls(hostname, project, treeish, path)", "def from_dict(cls, tag_dict):\n return cls(tag_dict.get('tag_type'), tag_dict.get('value'))", "def test_create_from_dict(self):\n sink_dict = {\n 'name': 'another-log-sink',\n 'destination': 'pubsub.googleapis.com/projects/my-logs/topics/logs',\n 'outputVersionFormat': 'V2',\n 'includeChildren': True,\n 'writerIdentity': (\n 'serviceAccount:[email protected]'),\n }\n\n sink = log_sink.LogSink.from_dict(self.folder_56, sink_dict)\n\n self.assertEqual('another-log-sink', sink.id)\n self.assertEqual('sink', sink.type)\n self.assertEqual('folders/56/sinks/another-log-sink', sink.name)\n self.assertEqual('', sink.sink_filter)\n self.assertEqual('pubsub.googleapis.com/projects/my-logs/topics/logs',\n sink.destination)\n self.assertTrue(sink.include_children)\n self.assertEqual(sink_dict, json.loads(sink.raw_json))", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n day = dictionary.get('day')\n end_time = cohesity_management_sdk.models.time.Time.from_dictionary(dictionary.get('endTime')) if dictionary.get('endTime') else None\n start_time = cohesity_management_sdk.models.time.Time.from_dictionary(dictionary.get('startTime')) if dictionary.get('startTime') else None\n\n # Return an object of this model\n return cls(\n day,\n end_time,\n start_time\n)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n address = dictionary.get('address')\n port = dictionary.get('port')\n protocol = dictionary.get('protocol')\n is_cluster_auditing_enabled = dictionary.get('isClusterAuditingEnabled')\n is_data_protection_enabled = dictionary.get('isDataProtectionEnabled')\n is_filer_auditing_enabled = dictionary.get('isFilerAuditingEnabled')\n is_ssh_log_enabled = dictionary.get('isSshLogEnabled')\n name = dictionary.get('name')\n\n # Return an object of this model\n return cls(address,\n port,\n protocol,\n is_cluster_auditing_enabled,\n is_data_protection_enabled,\n is_filer_auditing_enabled,\n is_ssh_log_enabled,\n name)", "def _from_dict(cls, d):\n confidence = d.get(\"confidence\", None)\n constant = d.get(\"constant\", False)\n tags = d.get(\"tags\", None)\n return cls(\n d[\"name\"],\n d[\"value\"],\n confidence=confidence,\n constant=constant,\n tags=tags,\n )", "def from_dict(cls, d):\n assert \"status\" in d\n assert \"metadata\" in d\n return cls(**d)", "def fromdict(cls,datadict):\n return cls(fmetric=datadict.get('fmetric'),\n fhost=datadict.get('fhost'),\n fvalue=datadict.get('fvalue'),\n ftime=datadict.get('ftime'),\n funit=datadict.get('funit'),\n finfo=datadict.get('finfo'))", "def from_dict(cls, dct):\n pass", "def from_dict(eventScheduleDict):\n pass", "def create_entry_for_topic(cls, topic, entry_id, content_hash):\n\t\tkey = cls.create_key(topic, entry_id)\n\t\treturn cls(key_name=key.name(),\n\t\t\t\t\t\t\t parent=key.parent(),\n\t\t\t\t\t\t\t entry_id=entry_id,\n\t\t\t\t\t\t\t entry_id_hash=utils.sha1_hash(entry_id),\n\t\t\t\t\t\t\t entry_content_hash=content_hash)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n fqdn = dictionary.get('fqdn')\n guid = dictionary.get('guid')\n id = dictionary.get('id')\n name = dictionary.get('name')\n owner_id = dictionary.get('ownerId')\n status = dictionary.get('status')\n total_size_bytes = dictionary.get('totalSizeBytes')\n\n # Return an object of this model\n return cls(\n fqdn,\n guid,\n id,\n name,\n owner_id,\n status,\n total_size_bytes\n)", "def from_dict(cls, dikt) -> 'SourceAudit':\n return util.deserialize_model(dikt, cls)", "def copy(log_entry):\n\n\t\tassert(isinstance(log_entry, LogEntry))\n\t\treturn LogEntry.from_data(log_entry.data, log_entry.intrusion)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n environment = dictionary.get('environment')\n relative_snapshot_directory = dictionary.get('relativeSnapshotDirectory')\n root_path = dictionary.get('rootPath')\n source_snapshot_create_time_usecs = dictionary.get('sourceSnapshotCreateTimeUsecs')\n source_snapshot_name = dictionary.get('sourceSnapshotName')\n view_name = dictionary.get('viewName')\n\n # Return an object of this model\n return cls(\n environment,\n relative_snapshot_directory,\n root_path,\n source_snapshot_create_time_usecs,\n source_snapshot_name,\n view_name\n)", "def from_dict(cls, d, record_cls=None):\n if record_cls is None:\n record_cls_str = d.get(cls._ELE_CLS_FIELD, None)\n if record_cls_str is None:\n raise DataRecordsError(\n \"Your DataRecords does not have its '%s' attribute \"\n \"populated, so you must manually specify the `record_cls` \"\n \"to use when loading it\" % cls._ELE_CLS_FIELD\n )\n record_cls = etau.get_class(record_cls_str)\n\n return DataRecords(\n record_cls=record_cls,\n records=[record_cls.from_dict(r) for r in d[cls._ELE_ATTR]],\n )", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n id = dictionary.get('Id')\r\n name = dictionary.get('Name')\r\n last_edited = APIHelper.RFC3339DateTime.from_value(dictionary.get(\"LastEdited\")).datetime if dictionary.get(\"LastEdited\") else None\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(id,\r\n name,\r\n last_edited,\r\n dictionary)", "def from_dict(cls, dictionary):\n obj = cls()\n for var, data in dictionary.items():\n obj[var] = data\n\n return obj", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n auto_log_backup = dictionary.get('autoLogBackup')\n dynamic_config = dictionary.get('dynamicConfig')\n entity_support = dictionary.get('entitySupport')\n full_backup = dictionary.get('fullBackup')\n incr_backup = dictionary.get('incrBackup')\n log_backup = dictionary.get('logBackup')\n multi_object_restore = dictionary.get('multiObjectRestore')\n\n # Return an object of this model\n return cls(\n auto_log_backup,\n dynamic_config,\n entity_support,\n full_backup,\n incr_backup,\n log_backup,\n multi_object_restore\n)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n ephemeral_public_key = dictionary.get('ephemeral_public_key')\r\n public_key_hash = dictionary.get('public_key_hash')\r\n transaction_id = dictionary.get('transaction_id')\r\n\r\n # Return an object of this model\r\n return cls(ephemeral_public_key,\r\n public_key_hash,\r\n transaction_id)", "def from_dict(cls, dictionary):\n instance = cls()\n for key, value in dictionary.items():\n instance.__dict__[key] = value\n\n return instance", "def from_dict(cls, data:{}):\n instance = cls(\n data['address'],\n None,\n data['frequency'],\n data['resolution'],\n data['servo_frequency']\n )\n if data['logging_level'] is not None:\n logger.setLevel(data['logging_level'])\n return instance", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n account_holder_name = dictionary.get('accountHolderName')\r\n routing_number = dictionary.get('routingNumber')\r\n\r\n # Return an object of this model\r\n return cls(account_holder_name,\r\n routing_number)", "def from_dict(cls, data):\r\n instance = cls()\r\n for key, value in data.items():\r\n instance.__dict__[key] = value\r\n return instance", "def from_dict(cls, d):\n d = d.copy()\n if \"length\" in d:\n # length argument removed in version 1.1.0\n del d[\"length\"]\n return cls(**d)", "def test_addEntryByDict(self):\n self.g.entryFormat = ['term', 'tags', 'value']\n b = self.g.add_entry({'term': 'foo', 'tags': 'a', 'value': '1'})\n self.assertTrue(b)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n id = dictionary.get('id')\n name = dictionary.get('name')\n mtype = dictionary.get('type')\n usage_bytes = dictionary.get('usageBytes')\n\n # Return an object of this model\n return cls(\n id,\n name,\n mtype,\n usage_bytes\n)", "def from_dict(cls, dct):\n dct['address'] = Address(**dct['address'])\n return cls(**dct)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n username = dictionary.get('username')\r\n first_name = dictionary.get('firstName')\r\n last_name = dictionary.get('lastName')\r\n application_id = dictionary.get('applicationId')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(username,\r\n first_name,\r\n last_name,\r\n application_id,\r\n dictionary)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n action = dictionary.get('action')\n cluster_info = dictionary.get('clusterInfo')\n details = dictionary.get('details')\n domain = dictionary.get('domain')\n entity_id = dictionary.get('entityId')\n entity_name = dictionary.get('entityName')\n entity_type = dictionary.get('entityType')\n human_timestamp = dictionary.get('humanTimestamp')\n impersonation = dictionary.get('impersonation')\n ip = dictionary.get('ip')\n new_record = dictionary.get('newRecord')\n original_tenant = cohesity_management_sdk.models.tenant.Tenant.from_dictionary(dictionary.get('originalTenant')) if dictionary.get('originalTenant') else None\n previous_record = dictionary.get('previousRecord')\n tenant = cohesity_management_sdk.models.tenant.Tenant.from_dictionary(dictionary.get('tenant')) if dictionary.get('tenant') else None\n timestamp_usecs = dictionary.get('timestampUsecs')\n user_name = dictionary.get('userName')\n\n # Return an object of this model\n return cls(\n action,\n cluster_info,\n details,\n domain,\n entity_id,\n entity_name,\n entity_type,\n human_timestamp,\n impersonation,\n ip,\n new_record,\n original_tenant,\n previous_record,\n tenant,\n timestamp_usecs,\n user_name\n)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n mor_item = dictionary.get('morItem')\n mor_type = dictionary.get('morType')\n uuid = dictionary.get('uuid')\n\n # Return an object of this model\n return cls(\n mor_item,\n mor_type,\n uuid\n)", "def from_dict(cls, dto):\n # Map column names back to structures fields. The keys must be equal to the column name.\n try:\n clean_dict = {c.name: dto[c.name] for c in cls.__table__.columns}\n return cls(**clean_dict)\n except KeyError as e:\n raise AppException(\"Missing key {} for {}\".format(str(e), cls.__name__))" ]
[ "0.6882139", "0.68295366", "0.6646199", "0.6632009", "0.634357", "0.6288146", "0.6288146", "0.62512225", "0.6236364", "0.6207709", "0.61589706", "0.6122494", "0.5947414", "0.59346", "0.5900016", "0.5896469", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58897763", "0.58642256", "0.58586097", "0.5854738", "0.58538973", "0.5852653", "0.5833452", "0.58268976", "0.58222383", "0.5814537", "0.57562095", "0.57215285", "0.5716658", "0.57055575", "0.569735", "0.56971514", "0.56740665", "0.5672543", "0.5654746", "0.56378305", "0.562929", "0.5628953", "0.56156856", "0.56067866", "0.5594739", "0.55885893", "0.5588269", "0.5584434", "0.55767155", "0.5562073", "0.5549865", "0.55484676", "0.5534401", "0.5529018", "0.5524529", "0.55131096", "0.5510277", "0.5501222", "0.5496768", "0.548476" ]
0.7672826
0
Valuecopy the given LogEntry object.
def copy(log_entry): assert(isinstance(log_entry, LogEntry)) return LogEntry.from_data(log_entry.data, log_entry.intrusion)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __copy__(self):\n return type(self)(self.value)", "def copy(self):\n return type(self)(self._val, lsd=self._lsd)", "def copy(self, source_valueid, dest_valueid):\n raise NotImplementedError(\"abstract\")", "def copy_entry(self, row, col):\n if self.results and self.settings['auto_copy']:\n row, col = self.table.currentRow(), self.table.currentColumn()\n to_copy = self.results[row][col]\n self.clipboard.setText(to_copy)", "def copy(self):", "def _clone_plugin_data(self, entry):\n try:\n return self.clone_plugin_data(entry)\n except Exception as err:\n logging.debug(str(err))", "def copy_value(obj):\n c = obj.GetDynamicValue(obj.GetPreferDynamicValue())\n return c", "def _unpack(self, entry):\n return entry._value", "def copy(self):\n pass", "def copy(self):\n pass", "def copy(self):\n pass", "def _prepare_cache(self, value):\n\n return deepcopy(value)", "def copy(self):\n return self.__class__(self.value, self.is_cloud)", "def __copy__(self):\n\t\tcopy_paster = Log()\n\t\tcopy_paster.__dict__.update(self.__dict__)\n\t\tcopy_paster.cur_tensor = self.cur_tensor.clone()\n\t\treturn copy_paster", "def __copy__(self):\n copy = self.__class__(self.param)\n copy.last_string2object_failed = self.last_string2object_failed\n copy.msg_handler = self.msg_handler\n return copy", "def __deepcopy__(self, memo):\n\t\tcopy_paster = Log()\n\t\tcopy_paster.__dict__.update(self.__dict__)\n\t\tcopy_paster.cur_tensor = self.cur_tensor.clone()\n\t\treturn copy_paster", "def copy_log_details(self) -> Sequence[Any]:\n return pulumi.get(self, \"copy_log_details\")", "def copy_log_details(self) -> Sequence[Any]:\n return pulumi.get(self, \"copy_log_details\")", "def copy_log_details(self) -> Sequence[Any]:\n return pulumi.get(self, \"copy_log_details\")", "def __copy__(self, *args, **kwargs):\n return self.copy()", "def _copy_(self):\n return copy.copy(self)", "def svn_info_t_copyfrom_rev_get(svn_info_t_self): # real signature unknown; restored from __doc__\n pass", "def copy(self, event):\n return", "def copy(self):\n return self.mutate().simple_copy()", "def copy(self):\n new = self.__class__()\n new.values = self.values.copy()\n return new", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def CopyTo(self, *args, **kwargs):\n pass", "def copy(self, source_valueid, dest_valueid):\n if source_valueid not in self._values:\n raise ValueError(\"Source of copy does not exist\")\n elif dest_valueid in self._values:\n raise ValueError(\"Destination of value already exists.\")\n \n source_holder = self._values[source_valueid]\n dest_holder = ValueHolder(dest_valueid, None)\n self._values[dest_valueid] = dest_holder\n\n def got_source_container(source_container):\n \n def got_value(v):\n dest_container = ValueContainer(value=v,\n pickle_supported=source_container.get_pickle_supported())\n dest_holder.set(dest_container)\n \n d = source_container.get_value()\n d.addCallback(got_value)\n return d\n \n d = source_holder.get()\n d.addCallback(got_source_container)\n return d", "def copy(self):\n return self.from_dict(self.to_dict(True))", "def __copy__(self):\n return self.copy()", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def clone_plugin_data(self, entry):", "def copy(self):\n return self.__class__(\n group_list=self.group_list, bits_except_last=self.bits_except_last,\n max_value=self.max_value\n )", "def copy(self):\r\n raise Exception, \"not implemented\"", "def copy(self):\r\n return copy.copy(self)", "def copy(self):\n\t\tassert ltrace_func(TRACE_BASE)\n\t\ttemp = self.__class__()\n\t\ttemp.copy_from(self)\n\t\treturn temp", "def copy(self) -> KeyedEffect:\n # getattr(self, '_last_uid_setter', uid_setter_instance) ??\n return deepcopy(self)", "def __init__(self, entry):\n \n self.lastChangedDate = entry.time\n self.size = entry.size\n self.kind = entry.kind\n self.logMessage = None", "def __copy__(self, *args, **kwargs): # real signature unknown\n pass", "def __copy__(self, *args, **kwargs): # real signature unknown\n pass", "def copy (a_data) :\n return a_data.copy()", "def copy(self):\n return super().copy()", "def varcopy(self, vars):", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def __deepcopy__(self, memo):\n return self.copy()", "def svn_info_t_copyfrom_rev_set(svn_info_t_self, svn_revnum_t_copyfrom_rev): # real signature unknown; restored from __doc__\n pass", "def clone(self) -> Any:\n return cp.copy(self)", "def __deepcopy__(self, memo):\n id_self = id(self)\n _copy = memo.get(id_self)\n if _copy is None:\n _copy = type(self)(\n deepcopy(self.value, memo))\n memo[id_self] = _copy\n return _copy", "def copy(self):\n\t\treturn pythoncopy.deepcopy(self)", "def copy(self):\n copy = Node(self.ident)\n for k, v in self.iteritems():\n copy[k] = v\n return copy", "def copy (self, **kwargs):\n out = copy.deepcopy (self)\n out.update (**kwargs)\n return out", "def log_entry(self, timestamp, entry):\n if timestamp in self.log:\n self.log[timestamp].update(entry)\n else:\n self.log[timestamp] = entry", "def copy_var(self, name, val):\n return self.env_copy({name: val})[name]", "def copy (self):\n import copy\n return copy.copy(self)", "def _aggregate_log_values(self, source, dest):\n remove = []\n for key, item in source.items():\n if \"data\" not in item:\n # Assume it's a sub-group\n dest[key] = {}\n self._aggregate_log_values(item, dest[key])\n else:\n aggregator = self._get_aggregator_for_key(key, item['agg'])\n value = aggregator(item['data'])\n if item['precision'] is not None:\n value = round(value, item['precision'])\n dest[key] = value\n if item['scope'] == 'get':\n remove.append(key)\n for key in remove:\n del source[key]", "def __copy__(self):\n raise NotImplementedError", "def __deepcopy__(self, memo):\n copy = self.__class__()\n copy.wvalues = self.wvalues\n return copy", "def copy(self):\n new = object.__new__(type(self))\n new.required = self.required\n new.title = self.title\n new.type = self.type\n values = self.values\n if (values is not None):\n values = (*values,)\n new.values = values\n return new", "def _pack(self,entry,value):\n entry._value=value", "def copy(obj):\n return loads(dumps(obj))", "def deep_copy(value: TValue) -> TValue:\n def pattern_dispatcher(v, memo=None):\n return v # we don't need to copy a regex pattern object, it's read-only\n\n old_dispatcher = copy._deepcopy_dispatch.get(PatternType, None)\n copy._deepcopy_dispatch[PatternType] = pattern_dispatcher\n try:\n return copy.deepcopy(value)\n finally:\n if old_dispatcher is not None: # pragma: no cover\n copy._deepcopy_dispatch[PatternType] = old_dispatcher\n else:\n del copy._deepcopy_dispatch[PatternType]", "def copyState(self, that : 'AbstractRecursiveFilter') -> None:\n self.n = that.n;\n self.t0 = that.t0;\n self.t = that.t;\n self.tau = that.tau;\n self.D = that.D;\n self.Z = that.Z;", "def Copy(self, copy):\n return _table.Table_Copy(self, copy)", "def copy(self, dest):\n if not isinstance(dest, Request):\n raise ValueError(\"'%s' should be a sub-class of 'Request'\" % dest)\n return dest.update_url(self.url).update_verb(self.verb)\\\n .update_params(self.params).update_data(self.data)", "def fast_deep_copy(v: _T) -> _T:\n return cast(_T, pickle.loads(pickle.dumps(v)))", "def __copy__(self):\n return self.__class__(self.m, self.n, self.data)", "def copy_dictionary(self,dictionary):\r\n return dictionary.copy()", "def copy( self, data = None, index = None, value = None ) :\n\n if( data is None ) : data = self.coefficients\n if( index is None ) : index = self.index\n if( value is None ) : value = self.value\n return( self.returnAsClass( self, data, index = index, value = value ) )", "def copy(self, *args):\n pwd = self.var_pwd.get()\n if pwd and not pwd.lower().startswith(\"error\"):\n self.clipboard_clear()\n self.clipboard_append(pwd)", "def copy(self):\n dnew = Date(self.month, self.day, self.year)\n return dnew", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy_values(self, another):\n\n # Copy all value, uncertainty, and source information from the other\n # ExoParameter object.\n if isinstance(another, ExoParameter):\n self.reference = another.reference\n self.uncertainty = another.uncertainty\n self.uncertainty_lower = another.uncertainty_lower\n self.uncertainty_upper = another.uncertainty_upper\n self.units = another.units\n self.url = another.url\n self.value = another.value\n else:\n raise TypeError(\"Cannot copy values from a non-ExoParameter obj!\")", "def copyValueReference(self):\r\n\r\n text = '\\n'.join([str(v.valueReference) for v in self.getSelectedVariables()])\r\n QApplication.clipboard().setText(text)", "def copy(self):\n return Result(super().copy())", "def copy(self):\n return State([r[:] for r in self.values], empty_loc=self.empty_loc)", "def copyto(self, src, where=None):\n for n, _ in enumerate(self.l):\n if where[n]:\n self.l[n] = src.l[n] # not a copy", "def copy(self: _R) -> _R:\n return self.__class__(self.dumps())", "def copy(self):\n return self.__class__(dict(self))", "def test_deepcopy(self):\n t = Precision()\n t.transform([2])\n copy.deepcopy(t)", "def gen_va_copy(self, expr: expressions.BuiltInVaCopy):\n # Fetch source va_list\n valist_ptrptr = self.gen_expr(expr.src, rvalue=False)\n va_ptr = self.emit(ir.Load(valist_ptrptr, \"va_ptr\", ir.ptr))\n\n # Save the arg pointer into the ap_list variable:\n valist_ptrptr = self.gen_expr(expr.dest, rvalue=False)\n self.emit(ir.Store(va_ptr, valist_ptrptr))", "def copy(self):\n return Level(repr=self.as_dict())", "def copy(self):\n return copy(self)", "def copy(self):\n return copy(self)", "def get_copy(self):\n # This is used in avant-idle to pass the content of a cache in\n # the main process to a second process where an exception has been\n # raised.\n return self.cache", "def copy(self):\r\n return copy.deepcopy(self)", "def copyElement(self,elem):\n return( self.newElement(elem.__class__,elem.__dict__) )", "def __copy__(self):\n return type(self)(self.sig, *self[1:])" ]
[ "0.58514726", "0.57540035", "0.56757045", "0.5513019", "0.544298", "0.5388371", "0.5373151", "0.53523105", "0.53194475", "0.53194475", "0.53194475", "0.53140247", "0.52943194", "0.52818", "0.52673143", "0.5245059", "0.52405113", "0.52405113", "0.52405113", "0.5237511", "0.5211075", "0.5208468", "0.519917", "0.51971114", "0.51794416", "0.51547706", "0.51547706", "0.51547706", "0.51547706", "0.51547706", "0.51547706", "0.51547706", "0.51525587", "0.5096951", "0.50681", "0.5063711", "0.5053609", "0.5053609", "0.5053609", "0.5040206", "0.50297016", "0.49779096", "0.49753433", "0.4975071", "0.49609455", "0.49592456", "0.49491656", "0.49491656", "0.49430448", "0.49207935", "0.49193665", "0.49044293", "0.49044293", "0.49044293", "0.48987132", "0.4886131", "0.48853052", "0.488351", "0.4877767", "0.48732936", "0.48709112", "0.48708603", "0.48688808", "0.4864987", "0.4861965", "0.48604017", "0.48600656", "0.48593777", "0.485756", "0.48499724", "0.4835384", "0.483532", "0.4829932", "0.48290935", "0.48183852", "0.48164174", "0.48090184", "0.48022008", "0.4800226", "0.47945505", "0.4788146", "0.4788146", "0.4788146", "0.4788146", "0.4785264", "0.4779098", "0.47655743", "0.47626048", "0.4759303", "0.47478086", "0.47460514", "0.47451055", "0.47351155", "0.47241536", "0.4722511", "0.4722511", "0.47192723", "0.47103542", "0.4705252", "0.47048464" ]
0.75690794
0
Set the field with the given key to the value specified if that is not None.\n
def _set_if_not_none(self, field_key, value, verifier=str): if value is None: return if verifier is not None: value = verifier(value) self.data[field_key] = value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setnoempty(self, key, value):\r\n if value:\r\n self[key] = value", "def set(self, key, value):\n if key in self.fields:\n return self.fields.get(key).set(value)\n else:\n self.fields[key] = CustomField(default=value)\n return True", "def set(self, key, value):\n self.db_dict.setdefault(self.actual_key(key), {})[key.field_name] = value", "def __setitem__(self, key, value):\n if key not in self.fields:\n raise KeyError(key)\n return setattr(self, key, value)", "def set(self, key, value):\n if value is not None:\n self.vars[key] = value", "def set_if_not_none_or_empty(dst, key, value):\n if value is not None and (type(value) is not list or len(value)):\n dst[key] = value", "def set(self, key, value):", "def set(self, key, value):", "def assign_value(self, key, value):\n self.field_values[key] = value", "def set_value(self, field, value):\n field = self.find_first(field)\n if field is not None:\n field.value = value", "def setkey(self, key, value):\n if value == \"\":\n return\n if key == \"agency_id\":\n self.agency_id = value\n elif key == \"agency_name\":\n self.agency_name = value\n elif key == \"agency_url\":\n self.agency_url = value\n elif key == \"agency_timezone\":\n self.agency_timezone = value\n elif key == \"agency_lang\":\n self.agency_lang = value\n elif key == \"agency_phone\":\n self.agency_phone = value\n elif key == \"agency_fare_url\":\n self.agency_fare_url = value\n elif key == \"agency_email\":\n self.agency_email = value\n else:\n raise InvalidKeyError(key)", "def set(self, key: t.Hashable, value: t.Any) -> None:", "def __init__(__self__, *,\n key: Optional[pulumi.Input[str]] = None,\n value: Optional[pulumi.Input[str]] = None):\n if key is not None:\n pulumi.set(__self__, \"key\", key)\n if value is not None:\n pulumi.set(__self__, \"value\", value)", "def setCommentField(self, key, value):\n if not key:\n raise KeyError()\n \n comment= \"\"\n if value:\n comment= \"{0}='{1}'\".format(key, value) \n\n if not self.comments:\n self.comments= comment\n return\n \n pattern= re.compile(key + r\"s*=\\s*'.+'\")\n \n match= pattern.search(self.comments)\n \n if match:\n #key exists -> replace\n self.comments= ( self.comments[0:match.start(0)].strip()\n + comment\n + self.comments[match.end(0):] ).strip()\n else:\n self.comments+= \"; \" + comment", "def setfield(self, field, value):\n self.__setitem__(field, value)", "def _setValue(self, field, value):\n self._contents[field] = value", "def setdefault(pb_or_dict, key, value):\n if not get(pb_or_dict, key, default=None):\n set(pb_or_dict, key, value)", "def _put(self, key: str, value):\n pass", "def _set(self, key, value):\n self._data[key] = value\n return self._data[key]", "def test_update_field_to_null(self, field, field_name):\n control = factories.ControlFactory()\n\n response = self.api.put(control, control.id, {field: None})\n\n self.assert400(response)\n self.assertEqual(response.json[\"message\"],\n field_name + \" for the object is not specified\")\n control = db.session.query(all_models.Control).get(control.id)\n self.assertIsNotNone(control.external_id)", "def set_if_set(dct, key, value):\r\n if value is not UNSET:\r\n dct[key] = value", "def __setitem__(self, key, item):\n self.set_field(key, item)", "def __setattr__(self, k, v):\n\t\tif self.__fields is not None and k in self.__fields:\n\t\t\tself.__fields[k] = v\n\t\t\treturn v\n\t\telse:\n\t\t\treturn object.__setattr__(self, k, v)", "def add_field(key: str = None, value=None):\n data = {}\n if value is not None:\n data = {key: value}\n return data", "def set(self, key, value):\r\n self.set_many({key: value})", "def _set_key_value(ext, key, value):\n ext.hdr[key] = value", "def __setitem__(self, key, value):\n if not self._is_valid(value):\n value = self._fix_value(value)\n self._inner.__setitem__(key, value)", "def _SetValue(param, field, value):\n attr = None\n attr_name = ''\n for attr_name in field.split('.'):\n if attr:\n param = attr\n\n if not hasattr(param, attr_name):\n raise ValueError(\"Can't find field %s.\" % field)\n attr = getattr(param, attr_name)\n param.SetField(attr_name, value)", "def _update_property(self, field, value):\n if value is not None:\n self.properties[field] = value\n elif field in self.properties:\n del self.properties[field]", "def hsetnx(self, key, field, value, handler=bool):\n return self._command(b'HSETNX', key, field, value)", "def put(self, key, val):\n pass", "def set(self, key, value):\n raise NotImplementedError", "def setValue(self, key, value, default=False):\n self.local[key.value] = value\n if default:\n self.system.setValue(key.value, value)", "def set_field(self, key, item, short=False):\n if short or self.tail_batch is None:\n self.memory.set(key, self.s, self.e, item)\n else:\n bl = len(self.tail_batch)\n self.memory.set(key, self.s, self.e, item[:-bl])\n self.tail_batch[key] = item[-bl:]", "def update_default_from_dict(self, key, value):\n pass", "def set_value(self, key, value):\n self.data[key] = value\n self.save_data()", "def setdefault(self, key):\n pass", "def setData(key, value):\n #only string keys are accepted\n if ( type(key) != str ): return None\n \n Co8PersistentData.__dataDict[key] = value", "def set_field(key, obj, val):\n\n o = obj\n subkeys = key.split('.')\n\n for subkey in subkeys[:-1]:\n if subkey not in o:\n o[subkey] = {}\n\n o = o[subkey]\n\n o[subkeys[-1]] = val", "def set(self, key, value):\n number = self._hash(key)\n stored_key = number if self.function == 'fnv' else key\n if self.get(key) is None:\n self.bucket_list[number % self.bucket_number].insert(stored_key, value)", "def on_put_field(self, ins, const, obj, value):\n pass", "def setUnknown(self, key, val):\n # type: (str, tp.Any)->None\n if 'unknown' not in self._ifAttributes:\n self._ifAttributes['unknown'] = {}\n self._ifAttributes['unknown'][key] = val", "def setPrm(self, key, val):\n if hasattr(key, \"encode\"):\n key = key.encode(\"utf-8\") # convert str to bytes\n if hasattr(val, \"encode\"):\n val = val.encode(\"utf-8\") # convert str to bytes\n return self.setVal(self.gbls, key, val)", "def set_one(self, name_group, key, value):\n self.set(name_group, key, value)\n for item in self.get_all_childname(name_group):\n if item != key:\n self.set(name_group, item, False)", "def _setParam(self, callerId, key, value):\n if key not in self.FilterParameters:\n self.__docWriter.addParam(callerId, key)", "def put(self, key, value, key_type=None, value_type=None):\n pass", "def __setitem__(self, key, value):\n if key in self.base_keys() or key == \"flex_data\":\n setattr(self, key, value)\n else:\n valid_key_chars = re.compile(r\"^[A-Za-z_]\\w*$\")\n FlexError.require_condition(\n valid_key_chars.match(key),\n dedent(\n \"\"\"\n flex_data attribute keys must contain only letters,\n numbers, and '_', and cannot start with a number.\n \"\"\"\n ),\n )\n if value is not None:\n # the flex_data attribute may be none if the instance is not\n # yet 'added'. Defaults are set at mapping time\n current_flex_data = self.flex_data or {}\n self.flex_data = {**current_flex_data, key: value}", "def set_value(obj, key, value):\r\n try:\r\n obj[key] = value\r\n except TypeError:\r\n setattr(obj, key, value)", "def set_data(version, key, value):\n if key not in ALLOWED_KEYS:\n raise Exception('The key is not allowed')\n if len(value) == 1:\n value = value[0]\n save_data(load_data(), version, key, value)", "def set_value(self, key, value=None,\n options=None, option_index=None, hidden=None):\n if key not in self:\n self.set_param(key, value,\n options=options,\n option_index=option_index)\n if options is not None:\n self[key]['options'] = options\n if value is None and option_index is None:\n option_index = self[key]['option_index']\n if option_index is not None:\n value = self[key]['options'][option_index]\n self[key]['option_index'] = option_index\n elif value is not None and self[key]['options'] is not None:\n try:\n option_index = self[key]['options'].index(value)\n self[key]['option_index'] = option_index\n except ValueError:\n pass\n elif self[key]['dtype'] == 'bool':\n if str(value).lower().strip() in FALSY:\n value = False\n else:\n value = True\n self[key]['value'] = value\n if hidden is not None:\n self[key]['hidden'] = hidden", "def reset_value(config: Config, key: str) -> None:\n path, _, key = key.rpartition(\".\")\n if path:\n config = config[path]\n\n field = config._get_field(key)\n if not field:\n raise AttributeError(key)\n\n field.__setdefault__(config)", "def set(self, key, value):\n self._data[key] = value", "def set(self, key, value):\n self._data[key] = value", "def __setitem__(self, key, val):\n self.__check_key_validity(key)\n self.data[key[0]][key[1]] = val", "def __setattr__(self, attr_k, val):\n # Dynamically setting the value of the Field\n try:\n attr = object.__getattribute__(self, attr_k)\n except AttributeError:\n attr = None\n if issubclass(attr.__class__, Field):\n attr.value = val\n else:\n return object.__setattr__(self, attr_k, val)", "def setfield_serialized(self, serialized_dict_fieldname, keyname, val):\n # create on first use\n sdict = self.getcreate_serializedbdict_forfield(serialized_dict_fieldname)\n sdict.set_keyval(keyname, val)\n self.set_isdirty(True)", "def __setitem__(self, key, value):\n self.params[key].value = value", "def test_set_non_dictionary_based_field(self):\n self.assertRaises(TypeError, self._p.set_fields, '')", "def setField(self, field):\n\n # Set the new property to container\n key = (field.getFieldID(), field.getTime())\n self.fields.set_value(key, field)", "def field(self, field):\n if field is None:\n raise ValueError(\"Invalid value for `field`, must not be `None`\")\n\n self._field = field", "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))", "def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))", "def set_value(self, key: keyType, new_value: valueType) -> None:\n self.validate(key, new_value)\n head_node_index, chain_node_index = self.exist_key(key)\n # \"head_node_index is equal to -1\" means that 'key' doesn't exist in dictionary object.\n if head_node_index == -1:\n self.add(key, new_value)\n else:\n self.hashTable[head_node_index].singlyLinkedList[chain_node_index].values = [new_value]", "async def set(self, key, value):\n trace_log(\"PersistantStorage: setting key \", key, \" to value \", value)\n self.dict[key] = value\n #self.log_set(key, value)", "def test_update_risk_field_to_null(self, field, field_name):\n risk = factories.RiskFactory()\n\n response = self.api.put(risk, risk.id, {\n field: None,\n })\n\n self.assert400(response)\n self.assertEqual(response.json[\"message\"],\n field_name + \" for the object is not specified\")\n risk = db.session.query(all_models.Risk).get(risk.id)\n self.assertIsNotNone(risk.external_id)", "def _checked_set(self, struct, field, value):\n setattr(struct, field, value)\n self._check_field_length(struct.DESCRIPTOR.fields_by_name[field], value)", "def set_item(self, key, value):\n # TODO: Add self.prefix\n self.table.putValue(key, value)", "def __init__(__self__, *,\n key: str,\n value: str):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "def __init__(__self__, *,\n key: str,\n value: str):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "def __init__(__self__, *,\n key: str,\n value: str):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "def __init__(__self__, *,\n key: str,\n value: str):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "def __init__(__self__, *,\n key: str,\n value: str):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)", "def set_field( self, data ):\n self.val[:] = data[:]\n return", "def __setattr__(self, key, value):\n if key != 'json_data':\n self.get_data()[key] = value\n else:\n super(BaseJsonEncodableObject, self).__setattr__(key, value)", "def set_key(self, key, value):\n if '.' in key:\n key, remainder = key.split('.', 1)\n try:\n self[key].set_key(remainder, value)\n except KeyError:\n self[key] = AttrDict()\n self[key].set_key(remainder, value)\n except AttributeError:\n if self[key] is None: # If the value is None, we replace it\n self[key] = AttrDict()\n self[key].set_key(remainder, value)\n # Else there is probably something there, and we don't just\n # want to overwrite so stop and warn the user\n else:\n raise KeyError('Cannot set nested key on non-dict key.')\n else:\n self[key] = value", "def __setitem__(self, key, value):\n\n self.fvals[key] = value", "def set_param(self, key='', value=None, dtype='str',\n wtype=None, name=None,\n options=None, option_index=0,\n description=None, hidden=False):\n if name is None:\n name = key\n if (value is None\n and options is not None\n and option_index is not None):\n try:\n value = options[option_index]\n except IndexError:\n pass\n # if not specified\n if wtype is None:\n if options is not None:\n wtype = 'combo_box'\n elif dtype == 'bool':\n wtype = 'check_box'\n else:\n wtype = 'text_box'\n if wtype == 'check_box':\n dtype = 'bool'\n if wtype == 'group':\n hidden = True\n pdict = {'value': value,\n 'dtype': dtype,\n 'wtype': wtype,\n 'name': name,\n 'options': options,\n 'option_index': option_index,\n 'description': description,\n 'hidden': hidden}\n OrderedDict.__setitem__(self, key, pdict)", "def __setitem__(self, key: str, value: typing.Any):\n self._params[key].value = value", "def set(self, key, value, ttl=0):\n pass", "def __setitem__(self, key: T, value: T) -> None:\n self.update(key, value)", "async def _set(self, key, value, ttl=0):\n value = str.encode(value) if isinstance(value, str) else value\n return await self.client.set(key, value, exptime=ttl or 0)", "def _set_item_impl(self, key: Any, value: Any) -> None:\n from omegaconf.omegaconf import _maybe_wrap\n\n from .nodes import AnyNode, ValueNode\n\n if isinstance(value, Node):\n do_deepcopy = not self._get_flag(\"no_deepcopy_set_nodes\")\n if not do_deepcopy and isinstance(value, Container):\n # if value is from the same config, perform a deepcopy no matter what.\n if self._get_root() is value._get_root():\n do_deepcopy = True\n\n if do_deepcopy:\n value = copy.deepcopy(value)\n value._set_parent(None)\n\n try:\n old = value._key()\n value._set_key(key)\n self._validate_set(key, value)\n finally:\n value._set_key(old)\n else:\n self._validate_set(key, value)\n\n if self._get_flag(\"readonly\"):\n raise ReadonlyConfigError(\"Cannot change read-only config container\")\n\n input_config = isinstance(value, Container)\n target_node_ref = self._get_node(key)\n special_value = value is None or value == \"???\"\n\n input_node = isinstance(value, ValueNode)\n if isinstance(self.__dict__[\"_content\"], dict):\n target_node = key in self.__dict__[\"_content\"] and isinstance(\n target_node_ref, ValueNode\n )\n\n elif isinstance(self.__dict__[\"_content\"], list):\n target_node = isinstance(target_node_ref, ValueNode)\n # We use set_value if:\n # 1. Target node is a container and the value is MISSING or None\n # 2. Target node is a container and has an explicit ref_type\n # 3. If the target is a NodeValue then it should set his value.\n # Furthermore if it's an AnyNode it should wrap when the input is\n # a container and set when the input is an compatible type(primitive type).\n\n should_set_value = target_node_ref is not None and (\n (\n isinstance(target_node_ref, Container)\n and (special_value or target_node_ref._has_ref_type())\n )\n or (target_node and not isinstance(target_node_ref, AnyNode))\n or (isinstance(target_node_ref, AnyNode) and is_primitive_type(value))\n )\n\n def wrap(key: Any, val: Any) -> Node:\n is_optional = True\n if not is_structured_config(val):\n ref_type = self._metadata.element_type\n else:\n target = self._get_node(key)\n if target is None:\n if is_structured_config(val):\n ref_type = self._metadata.element_type\n else:\n is_optional = target._is_optional()\n ref_type = target._metadata.ref_type\n return _maybe_wrap(\n ref_type=ref_type,\n key=key,\n value=val,\n is_optional=is_optional,\n parent=self,\n )\n\n def assign(value_key: Any, val: ValueNode) -> None:\n assert val._get_parent() is None\n v = val\n v._set_parent(self)\n v._set_key(value_key)\n self.__dict__[\"_content\"][value_key] = v\n\n if input_node and target_node:\n # both nodes, replace existing node with new one\n assign(key, value)\n elif not input_node and target_node:\n # input is not node, can be primitive or config\n if should_set_value:\n self.__dict__[\"_content\"][key]._set_value(value)\n elif input_config:\n assign(key, value)\n else:\n self.__dict__[\"_content\"][key] = wrap(key, value)\n elif input_node and not target_node:\n # target must be config, replace target with input node\n assign(key, value)\n elif not input_node and not target_node:\n if should_set_value:\n self.__dict__[\"_content\"][key]._set_value(value)\n elif input_config:\n assign(key, value)\n else:\n self.__dict__[\"_content\"][key] = wrap(key, value)", "def update_general(info, key, val):\n\n info[\"model_params\"][key] = val", "def set_option_value(self, key, value):\n\n # Check the key.\n self.__assert_option(key)\n\n # Set the value.\n self.__opt[key] = value", "def _get_value_or_default(self, key):\n value = getattr(self, key, None)\n if value is None:\n field = self.COLUMN_TO_FILED[key]\n if field.default is not None:\n value = field.default\n setattr(self, key, value)\n return value", "def set(self, key, value):\n self.data[key] = value\n logger.debug('Setting value \"%s\" for variable \"%s\"', value, key)", "def put(self, key, item):\n if key is None or item is None:\n return\n self.cache_data[key] = item", "def set_attribute(self, key, value):\n self.__dict[key] = value\n self.__nonzero = True", "def __setitem__(self, key, value):\n try:\n kvp = self.keyvaluepair_set.get(key=key)\n except KeyValuePair.DoesNotExist:\n KeyValuePair.objects.create(container=self, key=key, value=value)\n else:\n kvp.value = value\n kvp.save()", "def set_data(self,key='',val=None):\n parent_itm = self._root\n if '.' in key:\n parent_itm = self.get_data(self.parent_key(key))\n itm_key = key.split('.')[-1]\n if itm_key:\n try: \n parent_itm[itm_key] = val\n except:\n try: \n parent_itm[int(itm_key)] = val # list case\n except:\n parent_itm.append(val) # append to list case", "def convert_nulls(dic, null_value):\n for key in dic.iterkeys():\n if dic[key] is None:\n dic[key] = null_value" ]
[ "0.7059926", "0.6968242", "0.66217285", "0.66039425", "0.6589556", "0.65186286", "0.649909", "0.649909", "0.6493354", "0.63856846", "0.6376981", "0.6247511", "0.619377", "0.61290056", "0.6116902", "0.6098176", "0.60701644", "0.6058743", "0.6058516", "0.6049325", "0.6034123", "0.60324144", "0.59882534", "0.5963919", "0.59543633", "0.59541684", "0.5936525", "0.59148186", "0.589249", "0.5891535", "0.58498985", "0.5843175", "0.5838436", "0.57825315", "0.5780361", "0.5776296", "0.57659125", "0.5754691", "0.57345116", "0.5726585", "0.5716075", "0.570905", "0.56667995", "0.56484604", "0.56466746", "0.56449944", "0.5644833", "0.5643444", "0.564186", "0.56330425", "0.5628975", "0.5624277", "0.5624277", "0.56238055", "0.5619039", "0.5610573", "0.5610347", "0.56028813", "0.5583135", "0.5582728", "0.5581943", "0.5581943", "0.5581943", "0.5581943", "0.5581943", "0.5581943", "0.5581943", "0.5581943", "0.5581943", "0.55814075", "0.55814075", "0.55810064", "0.55807966", "0.55784655", "0.5578424", "0.55767083", "0.55739933", "0.55739933", "0.55739933", "0.55739933", "0.55739933", "0.557016", "0.5568439", "0.55594367", "0.55536085", "0.5553342", "0.5550499", "0.5546028", "0.55442727", "0.5536801", "0.55236673", "0.55132914", "0.55066425", "0.5495798", "0.5493672", "0.54862034", "0.54832", "0.5474979", "0.5472216", "0.5469501" ]
0.7619028
0
Return the given time or the current time if it's None.
def _get_current_time_if_none(given_time): return given_time or time.time()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_time(self, _time: Optional[float] = None) -> float:\n if _time is None:\n return time.time()\n\n return _time", "def get_time():\n return datetime.datetime.now()", "def time(self) -> Optional[str]:\n return pulumi.get(self, \"time\")", "def time(self) -> Optional[str]:\n return pulumi.get(self, \"time\")", "def time(self) -> Optional[str]:\n return pulumi.get(self, \"time\")", "def time(self) -> Optional[str]:\n return pulumi.get(self, \"time\")", "def time(self) -> Optional[str]:\n return pulumi.get(self, \"time\")", "def time(self) -> Optional[str]:\n return pulumi.get(self, \"time\")", "def get_now_time() -> str | None:\n if now_time := arg_to_datetime('now'):\n return now_time.strftime(DATE_FORMAT)\n else:\n return None", "def get_time(time):\n regtime = re.compile(r'^([0-1][0-9]|[2][0-3]):([0-5][0-9])$')\n if not regtime.match(time):\n return None\n time_group = regtime.match(time).groups()\n time_final = datetime.time(int(time_group[0]), int(time_group[1]))\n return time_final", "def get_time():\n\teastern = timezone('US/Eastern')\n\tnow = datetime.datetime.now(eastern).time()\n\treturn(now)", "def get_timeval():\n return convert_timeval(time.time())", "def get_current_time():\n return datetime.datetime.now()", "def __get_timeval():\n return convert_timeval(time.time())", "def time_now():\n return datetime.datetime.now().time()", "def get_current_time():\n return int(time.time())", "def current_time():\n return time.time()", "def time_of_the_day(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"time_of_the_day\")", "def time(value, arg=None):\n if value in (None, ''):\n return ''\n try:\n return formats.time_format(value, arg)\n except (AttributeError, TypeError):\n try:\n return time_format(value, arg)\n except (AttributeError, TypeError):\n return ''", "def time(self):\n try:\n if self.single_date:\n return self.stime\n else:\n return self.stime + (self.etime - self.stime) / 2\n except TypeError:\n return None", "def __get_current_time(self) -> datetime:\n #return datetime.strptime(\"11:30\", '%H:%M')\n return datetime.now()", "def get_current_time(self):\n return self.time", "def get_time():\r\n return datetime.datetime.now().strftime(\"%H\")+\":\"+datetime.datetime.now().strftime(\"%M\")+\":\"+datetime.datetime.now().strftime(\"%S\")", "def current_time():\n now = datetime.datetime.now()\n time = now.strftime(\"%Y-%m-%d %H:%M:%S:%f\")\n return time", "def get_current_time():\n return datetime.now()", "def current_time(cls) -> float:", "def get_time(self):\n return self.time_param", "def get_time(self):\n return self._current_time", "def get_time(self):\n return self._current_time", "def get_time() -> int:\n return store.time", "def time_now():\n return time.time()", "def friendly_time(time=None):\n if time is None:\n time = pass_good_until()\n return time.strftime(config.TIME_PRINT_FORMAT)", "def curr_time():\r\n try:\r\n curr_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n except Exception as e:\r\n print(e)\r\n curr_time = False\r\n return curr_time", "def datetime_from_time(time: datetime.time, date: datetime.date = datetime.date.today()):\n if type(time) == datetime.time:\n return datetime.datetime.combine(date, time)\n else:\n return time", "def currentTime():\n return strftime(\"%H:%M:%S\", time.localtime())", "def get_time(self):\n return self.time", "def formatted_time() -> datetime.datetime:\r\n return datetime.datetime.now()", "def get_time(self):\n return self._current_time_sec", "def get_time():\n return time.strftime(\"%Y%m%d-%H%M%S\")", "def nowStr(time=None):\n if time is None:\n time = datetime.now().time()\n if time.minute < 10:\n return time.strftime(\"%H ноль %m\")\n else:\n return time.strftime(\"%H %M\")", "def getDefaultTime(self):\n return max(tvp[0] for tvp in self.timeValuePairs)", "def get_time(self):\n return datetime.datetime.now(self.time_zone)", "def now(time):\n a = datetime.fromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')\n return a", "def now(time):\n a = datetime.fromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')\n return a", "def get_time():\n\n time_format = \"%Y-%m-%d %H:%M:%S\"\n now = str(datetime.datetime.now().strftime(time_format))\n\n return now", "def now():\n if os.sys.platform == 'win32':\n return time.clock() # best for windows? seems to give finer temporal resolution.\n else:\n return time.time() # best for Unix, others???", "def get_current_time():\n return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())", "def current_state_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"current_state_time\")", "def time_from_now(self, **options):\n return self.time_from(self.now())", "def current_time():\n now = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S.%f\")\n return now", "def get_time():\r\n \r\n dt = datetime.datetime.now()\r\n dt_parsed = dt.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n return dt_parsed", "def getCurrentTime():\n\tnow = datetime.datetime.now()\n\thr = now.hour\n\tgreeting = \"\"\n\tampm = \"\"\n\tif (hr < 12): #morning\n\t\thr = hr\n\t\tgreeting = \"morning\"\n\t\tampm = \"am\"\n\telif (hr >= 12 and hr < 1): #afternoon\n\t\thr = hr\n\t\tgreeting = \"afternoon\"\n\t\tampm = \"noon\"\n\telif (hr > 12 and hr < 19): #evening\n\t\thr = hr - 12\n\t\tgreeting = \"evening\"\n\t\tampm = \"pm\"\n\telse: #night\n\t\thr = hr - 12\n\t\tgreeting = \"night\"\n\t\tampm = \"pm\"\n\treturn str(hr) + ':' + str(now.minute),ampm, ' in the ', greeting", "def get_now_hour_utc(no_microseconds=True):\n if no_microseconds:\n return datetime.time.utcnow().replace(microsecond=0).time()\n else:\n return datetime.time.utcnow().time()", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time", "def _get_current_time() -> str:\n return datetime.now().strftime(\"%FT%H:%M:%S\")", "async def current_time_handler():\n\n return time_millis()", "def lasttime(self):\n if hasattr(self, \"_lasttime\"):\n return self._lasttime\n else:\n return None", "def get_current_time():\n dateTime = datetime.datetime.now()\n # \"%Y-%m-%d %H:%M:%S:%f\" is default formatting with everything\n dateTime = dateTime.strftime(\"%m-%d-%y %H:%M:%S\")\n\n logger.logger.debug(\"Getting current time: {}\".format(dateTime))\n\n return dateTime", "def now():\n return utcfromtimestamp(time.time())", "def time_now():\n cur_time = str(datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\"))\n return cur_time", "def get_time_based_otp(self, otp_passcode: Optional[str] = None):\n if otp_passcode:\n self.set_time_based_otp(otp_passcode)\n if not self._totp:\n raise TOTPNotSetError(TOTPNotSetError.ERROR_MSG)\n return self._totp.now()", "def time():\n _check_init()\n return _pypm.Time()", "def get_time() -> str:\n return strftime(\"%H:%M:%S\")", "def get_now_hour_utc(no_microseconds=True):\n if no_microseconds:\n return datetime.datetime.utcnow().replace(microsecond=0).time()\n else:\n return datetime.datetime.utcnow().time()", "def time():\n return datetime.datetime.now().strftime(\"%Y%m%dT%H%M%SZ\")", "def get_time(self):\n\t\treturn time.time()", "def get_time(cls):\n now = rospy.Time.now()\n return now.secs + now.nsecs*(10**-9) # time in seconds", "def get_time_defaults(game_id: int, start_time: float = None, end_time: float = None):\n game_start, game_end = get_game_start_and_end(game_id)\n if start_time is None:\n start_time = game_start\n\n if end_time is None:\n current_time = time.time()\n end_time = current_time if current_time < game_end else game_end\n\n return start_time, end_time", "def get_time(self):\n return self.__time", "def get_current_time() -> int:\n float_time = time.time()\n return int(float_time * 1000) # Convert to ms and int", "async def time(self) -> dt.time:\n now = await self.AD.sched.get_now()\n return now.astimezone(self.AD.tz).time()", "def getOldestTime(self):\n if not self.cache_times:\n return None\n return self.cache_times[0]", "def time_now():\n ts = datetime.datetime.now().timetuple()\n return '{wday} {day} {month} {year} {hour}:{minute:0>2d}:{second:0>2d} UTC'.format(\n year=ts.tm_year, month=calendar.month_name[ts.tm_mon],\n day=ts.tm_mday, wday=calendar.day_name[ts.tm_wday],\n hour=ts.tm_hour, minute=ts.tm_min, second=ts.tm_sec)", "def _astropy_time(time):\n return time if isinstance(time, astropy.time.Time) else astropy.time.Time(parse_time(time))", "def get_time():\n # Use this one for production:\n now_time = pendulum.now(tz=pendulum.timezone(\"America/New_York\"))\n # Use this one for testing and modify as needed:\n # now_time = pendulum.datetime(2019, 7, 21, 20, 00, tz='America/New_York')\n\n return now_time", "def get_time_now(as_string: bool = False) -> Union[str, datetime]:\n now = arrow.utcnow()\n return now.iso_format() if as_string else now.datetime # type: ignore", "def get_current_time() -> int:\n hour_min = datetime.now(\n pytz.timezone('US/Eastern')\n ).strftime(\"%H,%M\").split(',')\n\n return int(''.join(hour_min))", "def create_time(given_time: Any | None) -> str | None:\n if not given_time:\n return None\n if datetime_time := arg_to_datetime(given_time):\n return datetime_time.strftime(DATE_FORMAT)\n else:\n raise DemistoException(\"Time parameter supplied in invalid, make sure to supply a valid argument\")", "def get_timer_set_time(self) -> Optional[int]:\n current_mode = self.get_mode()\n # Check that 'Timer' program is enabled.\n # Retreiving the set-point time without\n # this programm being selected first would trigger\n # a key error when unpacking the device reply.\n if current_mode != 'Timer':\n self.logger.warning(\"Can't retreive set time of the 'Timer' program \"\n \"since this program is not currently selected \"\n f\"(selected program is '{current_mode}'). Select \"\n \"'Timer' program first.\")\n return None\n else:\n return self.send(self.cmd.GET_TIMER_SET_TIME)", "def current_time():\n start = datetime.time(hour=alarm_start_hour, minute=alarm_start_minute)\n now = datetime.datetime.now()\n\n delta = datetime.timedelta(hours=now.hour - start.hour, minutes=now.minute - start.minute)\n\n return max(0, delta.seconds)", "def get_current_time(time_max=False):\n # Get local time from Adafruit IO\n magtag.get_local_time(secrets[\"timezone\"])\n # Format as RFC339 timestamp\n cur_time = r.datetime\n if time_max: # maximum time to fetch events is midnight (4:59:59UTC)\n cur_time_max = time.struct_time(\n (\n cur_time[0],\n cur_time[1],\n cur_time[2] + 1,\n 4,\n 59,\n 59,\n cur_time[6],\n cur_time[7],\n cur_time[8],\n )\n )\n cur_time = cur_time_max\n cur_time = \"{:04d}-{:02d}-{:02d}T{:02d}:{:02d}:{:02d}{:s}\".format(\n cur_time[0],\n cur_time[1],\n cur_time[2],\n cur_time[3],\n cur_time[4],\n cur_time[5],\n \"Z\",\n )\n return cur_time", "def stime(self):\n try:\n return self['datetime_1']\n except TypeError:\n return None", "def current_time():\n\n return int(1000 * time())", "def make_current():\n current = datetime.datetime.now()\n hour = '{:02d}'.format(current.hour)\n minute = '{:02d}'.format(current.minute)\n second = '{:02d}'.format(current.second)\n current_time = hour + minute + second\n return current_time", "def time_to_now(self, **options):\n return self.time_to(self.now())", "def getDefaultTime(self):\n return self.valueSchedule.getDefaultTime()", "def gettime(self):\n return self.t", "def time_in_millis(my_time=None):\n\n if my_time:\n t = my_time\n else:\n t = gmtime()\n\n return timegm(t)", "def friendly_time(time=None):\n if time is None:\n time = pass_good_until(config.HOURS_TO_GRANT_ACCESS, 7)\n return time.strftime(config.TIME_PRINT_FORMAT)", "def get_time_now(self, mboard=0):\n return _uhd_swig.usrp_source_get_time_now(self, mboard)", "def time_left(timestamp):\n if timestamp is None:\n return None\n return max(0.0, timestamp - time.time())", "def current_time(self):\n return self._current_time", "def timet_iso(t=time.time()):\n return datetime.datetime.now().isoformat()[0:19]", "def getTime(toConvert = None):\n if toConvert == None:\n return time.mktime(\n datetime.datetime.now().timetuple()\n )\n else:\n return time.mktime(\n toConvert.timetuple()\n )", "def now_s():\n return calendar.timegm(now_dt().utctimetuple())", "def now():\r\n return time.time()", "def get_current_time():\n\n now = dt.datetime.now()\n total_time = (now.hour * 3600) + (now.minute * 60) + (now.second)\n return total_time", "def _time_string():\n os.environ['TZ'] = config.time_zone\n time.tzset()\n return time.asctime()" ]
[ "0.80231345", "0.7069105", "0.6942688", "0.6942688", "0.6942688", "0.6942688", "0.6942688", "0.6942688", "0.6916243", "0.68154377", "0.66877276", "0.66112775", "0.6595094", "0.655087", "0.65502095", "0.6544651", "0.65381604", "0.6524668", "0.64543414", "0.6442125", "0.64383686", "0.64383686", "0.6432888", "0.6417531", "0.6402062", "0.6397155", "0.63804525", "0.6367786", "0.6367786", "0.6355576", "0.6352885", "0.62944967", "0.6280638", "0.6257087", "0.62548083", "0.6254632", "0.6252267", "0.624088", "0.62342066", "0.62332004", "0.6224695", "0.62217915", "0.6183878", "0.6183878", "0.6166927", "0.6149149", "0.6130191", "0.6117673", "0.61074287", "0.6104145", "0.609948", "0.60985506", "0.60803956", "0.60716915", "0.60716915", "0.60716915", "0.60714656", "0.6069004", "0.606862", "0.605665", "0.605299", "0.60484254", "0.6046377", "0.60456496", "0.6043828", "0.6031766", "0.6029141", "0.60288435", "0.6008229", "0.6005633", "0.59821516", "0.5982049", "0.59789634", "0.59580916", "0.5954145", "0.5953992", "0.5952647", "0.59517306", "0.5940293", "0.5933667", "0.59316254", "0.5926612", "0.5917196", "0.5905139", "0.58901995", "0.58881", "0.5887879", "0.5886399", "0.58791196", "0.58652157", "0.5855974", "0.58497757", "0.58407736", "0.58390903", "0.58352137", "0.58268034", "0.5820659", "0.5816158", "0.58121866", "0.5788094" ]
0.85907197
0
Return the given UUID or generate one if it's None.
def _generate_uuid_str_if_none(given_uuid): return given_uuid or uuid.uuid4().__str__()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_uuid():\n return str(uuid.uuid4())", "def generate_uuid():\n return uuid.uuid4()", "def _generate_uuid():\n return str(uuid.uuid4())", "def generate_uuid():\n return f'{uuid.uuid1()}'", "def get_uuid():\n\n x = uuid.uuid1()\n return str(x)", "def generate_uuid():\n return uuid.uuid4().hex", "def generate_uuid():\n return uuid.uuid4()", "def uuid_(identifier: Optional[uuid.UUID]) -> Optional[str]:\n if identifier is None:\n return None\n\n return str(identifier)", "def get_uuid():\n\n return str(uuid.uuid4())", "def uuid(_uuid=uuid4):\n return str(_uuid())", "def get_uuid():\n return str(uuid4())", "def gen_uuid():\n return str( uuid.uuid4() )", "def gen_uuid():\n return str( uuid.uuid4() )", "def gen_uuid():\n return str(uuid.uuid4())", "def generateUUID(): # pylint: disable=C0103\r\n return str(uuid.uuid4())", "def getUUID():\n return str(uuid.uuid4())", "def generate_uuid(basedata=None):\n if basedata is None:\n return str(uuid.uuid4())\n elif isinstance(basedata, str):\n checksum = hashlib.md5(basedata.encode()).hexdigest()\n return str(uuid.UUID(checksum))\n else:\n raise TypeError(\"The 'basedata' must be string or None\")", "def gen_uuid() -> str:\n return str(uuid4())", "def genUuid(seq=None):\n if seq is not None:\n return uuid.uuid1().hex + uuid.uuid3(uuid.NAMESPACE_DNS, seq).hex\n return uuid.uuid1().hex + uuid.uuid3(\n uuid.NAMESPACE_DNS, uuid.uuid1().hex).hex", "def _generate_uuid(self):\n\n return uuid.uuid4()", "def _NewUUIDString ():\n if __HaveUUID:\n return uuid.uuid1().urn\n return '%s:%08.8x' % (time.strftime('%Y%m%d%H%M%S'), random.randint(0, 0xFFFFFFFF))", "def generate_ulid_as_uuid(timestamp=None, monotonic=False):\n return uuid.UUID(bytes=generate_binary_ulid(timestamp, monotonic=monotonic))", "def get_uuid(device):\n uuids = uuid_table()\n return str(uuids[device])", "def uuid(self):\n if self._uuid is not None:\n return self._uuid\n if not self.exists:\n return None\n self.retr_uuid()\n return self._uuid", "def _uuid(self):\n u = self.__uuid\n if u is None:\n u = str(uuid.uuid1())\n self._set_uuid(u)\n return u", "def get_uuid(s):\n sha = sha256(s.encode('utf-8')).hexdigest()\n uuid = UUID(sha[:32])\n return str(uuid)", "def UUID(self, default=None):\n return self.data.get('uuid', default)", "def build_uuid(self):\n self._uuid = str(uuid.uuid1())\n return self._uuid", "def uuid():\n return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(32))", "def get_or_create_unique_id(self):\n if not self.unique_id:\n self.unique_id = uuid.uuid4().hex\n return self.unique_id", "def uuid(seed):\n return uuid4().get_hex()", "def _make_uuid(val):\n h = hashlib.md5(val).hexdigest()\n return '{0}-{1}-{2}-{3}-{4}'.format(\n h[:8], h[8:12], h[12:16], h[16:20], h[20:])", "def uuid( *args ):\n t = long( time.time() * 1000 )\n r = long( random.random()*100000000000000000L )\n try:\n a = socket.gethostbyname( socket.gethostname() )\n except:\n # if we can't get a network address, just imagine one\n a = random.random()*100000000000000000L\n data = str(t)+' '+str(r)+' '+str(a)+' '+str(args)\n data = hashlib.md5(data).hexdigest()\n return data", "def uuid(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"uuid\")", "def uuid(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"uuid\")", "def uuid(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"uuid\")", "def generate_id():\n return str(uuid.uuid4())[:5].replace('e','a')", "def ordered_uuid(value=None):\n if not HAVE_ORDERED_UUID:\n raise RuntimeError(\"ordered_uuid package: not found\")\n if not value:\n value = str(uuid.uuid1())\n return OrderedUUID(value)", "def create_default_identifier():\n return random.randint(0, constants.UINT64_MAX)", "def _generate(self, hashed = True):\r\n\r\n identifier = str(uuid.uuid4())\r\n identifier = identifier.upper()\r\n if not hashed: return identifier\r\n identifier = legacy.bytes(identifier)\r\n hash = hashlib.sha256(identifier)\r\n identifier = hash.hexdigest()\r\n identifier = identifier.upper()\r\n return identifier", "def generate_id():\n return uuid4().get_hex()", "def uuid_for_next_dt(u):\n t60 = time60_from_uuid(u)\n return min_uuid_from_time60(t60 + 1)", "def _load_uuid(hass, filename=UPDATER_UUID_FILE):\n try:\n with open(hass.config.path(filename)) as fptr:\n jsonf = json.loads(fptr.read())\n return uuid.UUID(jsonf['uuid'], version=4).hex\n except (ValueError, AttributeError):\n return None\n except FileNotFoundError:\n return _create_uuid(hass, filename)", "def safe_uuid() -> str:\n\n taken = os.listdir(DATA_DIR)\n while True:\n new_uuid = gen_uuid()\n if new_uuid in taken:\n logger.warning('uuid collision %s', new_uuid)\n else:\n logger.info('uuid=%s', new_uuid)\n return new_uuid", "def getuuid(value, table, table_attrib, error_tail):\n if value is None:\n return value\n\n elif modelfor(value, table):\n value = getattr(value, table_attrib, None)\n if value is None:\n raise ValueError(\"null id provided for %s\" % error_tail)\n return value\n\n # if a string was provided then we should\n # try to convert it into a uuid first to\n # be sure it's valid\n elif isinstance(value, STRING_TYPES):\n UUID(value)\n return value\n\n elif isinstance(value, UUID):\n return str(value)\n\n else:\n raise ValueError(\"failed to determine %s\" % error_tail)", "def generate_client_token_by_uuid():\n return str(uuid.uuid4())", "def generate_client_token_by_uuid():\n return str(uuid.uuid4())", "def default_code():\n return uuid.uuid4().hex", "def generate_user_id() -> str:\n return 'u' + str((uuid.getnode()))", "def new_uid():\n return str(uuid.uuid1())[:30]", "def guid():\n return _guid64()", "def get_uuid(self, obj):\n return IUUID(obj, None)", "def get_uuid(self, obj):\n return IUUID(obj, None)", "def uuid4():\n b = ''.join('%x' % x for x in os.urandom(16))\n return \"%s-%s-%s-%s-%s\" % (b[0:8], b[8:12], b[12:16], b[16:20], b[20:])", "def uuid(self, obj: typing.Any = None) -> str:\n if obj is None:\n obj = self.randomString()\n self._counter += 1\n elif isinstance(obj, bytes):\n obj = obj.decode('utf8') # To binary\n else:\n obj = '{}'.format(obj)\n\n return str(uuid.uuid5(self._namespace, obj)).lower() # I believe uuid returns a lowercase uuid always, but in case... :)", "def test_uuid_default(self):\r\n default = uuid.uuid4()\r\n prop = UUID(default=default, required=True)\r\n self.assertEqual(prop.to_database(None), prop.to_database(default))", "def generate_trackerid():\n\n trackerid = None\n while trackerid is None or \\\n Profile.objects.filter(trackerid=trackerid).exists():\n trackerid = uuid.uuid4().hex\n return trackerid", "def generateId( self ):\n # try to use the uuid module\n try:\n import uuid\n return uuid.uuid1()\n \n # otherwise, use the random module\n except ImportError:\n import random\n return random.randint(-1000000000000, 1000000000000)", "def set_uuid(self, device):\n import uuid\n\n return str(uuid.uuid4())", "def _make_uuid():\n parts = [Record._hex_string(k) for k in Record.UUID_PARTS]\n return \"-\".join(parts)", "def generate_wallet_id(cls) -> str:\n return str(uuid.uuid4())", "def generate_uuid(value, org_id_prefix, org_name=None):\n # TODO: Refactor to avoid duplication\n if org_id_prefix:\n base_hash = hashlib.md5(org_id_prefix.encode())\n else:\n base_hash = hashlib.md5(org_name.encode())\n\n base_digest = base_hash.hexdigest()\n base_uuid = uuid.UUID(base_digest)\n\n combined_value = (str(base_uuid) + str(value)).encode()\n value_hash = hashlib.md5(combined_value)\n value_digest = value_hash.hexdigest()\n value_uuid = str(uuid.UUID(value_digest))\n return value_uuid", "def create_random_username() -> str:\n return str(uuid.uuid4())", "def uuid(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"uuid\")", "def generate_storage_identifier():\n return str(uuid.uuid1())", "def generate_token():\n return uuid4()", "def uuid4(short: bool = False) -> str:\n return str(uuid.uuid4())[:18 if not short else 8]", "async def get_uuid(url: str, token: str | None = None) -> str | None:\n conn = ZWaveMe(url=url, token=token)\n uuid = None\n if await conn.get_connection():\n uuid = await conn.get_uuid()\n await conn.close_ws()\n return uuid", "def function_uuid():\r\n yield uuid.uuid4()", "def generate_trackerid():\r\n\r\n trackerid = None\r\n while trackerid is None or \\\r\n Profile.objects.filter(trackerid=trackerid).exists():\r\n trackerid = uuid.uuid4().hex\r\n return trackerid", "def generate_uid(length=10):\n uid = uuid.uuid4().hex[0:length]\n return uid", "def create_uid():\n return random_string(5, string.hexdigits.lower())\n # return (\"%x\" % (int(time.time()) * 0x10 % 0x1000000000)\n # + random_string(7, string.hexdigits.lower()))", "def generate_random_UID(self):\n\t\tUID = random.randint(0, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)\n\t\tprint(f\"Generated random UID {UID}\")\n\t\treturn UUID(int=UID)", "def __generate_random_string():\n return uuid4().hex[:6].upper()", "def uuid(self):\n try:\n return self.keystore['id']\n except KeyError:\n return None", "def _uniq_id():\n return random.getrandbits(64)", "def create_tag_id():\n return uuid.uuid1().int", "def generate_product_number():\n return str(uuid.uuid4())", "def getUserUuid(userId):\n return searchForUser(userId)['uuid']", "def uuid():\n from dallinger.experiment import Experiment\n\n click.echo(Experiment.make_uuid())", "def get_uuid(self):\n\t\treturn call_sdk_function('PrlUsrInfo_GetUuid', self.handle)", "def to_uuid(string):\n if sys.version_info[0] == 2:\n string = string.encode('utf-8')\n \n # This the seed Ansible has chosen for their UUID's\n return str(uuid.uuid5(uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E'), string))", "def UUIDGen():\n\trandGen = random.Random()\n\trandGen.seed()\n\thashGen = sha.new(randStr512(randGen))\n\twhile 1:\n\t\thashGen.update(randStr512(randGen))\n\t\thashed = hashGen.digest()\n\t\tyield '%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x' % (\n\t\t\tord(hashed[0]), ord(hashed[1]), ord(hashed[2]), ord(hashed[3]),\n\t\t\tord(hashed[4]), ord(hashed[5]),\n\t\t\tord(hashed[6]) & 0x0F | 0x40, ord(hashed[7]),\n\t\t\tord(hashed[8]) & 0x3F | 0x80, ord(hashed[9]),\n\t\t\tord(hashed[10]), ord(hashed[11]),\n\t\t\tord(hashed[12]), ord(hashed[13]), ord(hashed[14]), ord(hashed[15]) )", "def generate_key():\n return str(uuid.uuid4())", "def get_uuid_from_url(self, request):\n url_parts = request.META.get(\"PATH_INFO\").split(\"/\")\n try:\n given_uuid = str(UUID(url_parts[url_parts.index(\"cost-models\") + 1]))\n except ValueError:\n given_uuid = None\n return given_uuid", "def _random_id():\n return binascii.hexlify(os.urandom(4)).decode()", "def uuid(self, type, val):\n picker = lambda x: x.get('uuid', x)\n return self._get((type, val), picker)", "def guid():\n base_uuid = uuid.uuid4()\n number = base_uuid.int & ((2 ** 20) - 1)\n return base62_encode(number)", "def get_uuid(disk):\n\n #TODO\n return \"Unknown\"", "def uuid(self, *args ):\n t = int( time.time() * 1000 )\n r = int( random.random() * 100000000000000000 )\n try:\n a = socket.gethostbyname( socket.gethostname() )\n except:\n # if we can't get a network address, just imagine one\n a = random.random() * 100000000000000000\n data = str(t) + ' ' + str(r) + ' ' + str(a) + ' ' + str(args)\n data = md5.md5(data).hexdigest()\n\n return data", "def generate_random_uid():\n choice = string.ascii_uppercase + string.digits\n return ''.join([random.choice(choice) for _ in range(36)])", "def generate_unique(seed=None):\n if not seed:\n seed = str(time() * random())\n return unicode(abs(hash(sha(seed).hexdigest())))", "def id_generator():\r\n new_id = uuid.uuid4()\r\n return new_id.hex", "def get_unique_id():\n\n return int.from_bytes(os.urandom(3), byteorder='big')", "def skip_or_run_uuid_test(func):\n\n return skip_or_run_test_tarantool(func, '2.4.1',\n 'does not support UUID type')", "def generate_id():\n return \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())", "def uid():\r\n u = str(uuid.uuid4())[:22]\r\n u = u.replace(\"-\",\"_\")\r\n return u", "def _get_uuid(self, *args: Any, **kwargs: Any) -> str:\n return \"distance\"", "def makeid(cls):\n return str(uuid.uuid4().hex)", "def _get_rec_uuid(self, uuid, context=None):\n if context is not None:\n moduuid = context.get('moduuid')\n if moduuid:\n return get_uuid(moduuid, uuid)\n return uuid" ]
[ "0.7421662", "0.7368809", "0.73120177", "0.72946113", "0.7287514", "0.72534025", "0.7210841", "0.7191255", "0.7175415", "0.71420234", "0.70950955", "0.70928544", "0.70928544", "0.7074644", "0.6998014", "0.6964532", "0.69394946", "0.6919046", "0.6864828", "0.6823264", "0.67820066", "0.6770247", "0.6718863", "0.6665475", "0.66348696", "0.6585173", "0.65849054", "0.65606475", "0.6532737", "0.6531959", "0.6456424", "0.64063334", "0.6393034", "0.63889086", "0.63889086", "0.63889086", "0.6368835", "0.6357101", "0.63348204", "0.6292439", "0.6278638", "0.6276865", "0.62614167", "0.6257636", "0.6250394", "0.6242134", "0.6242134", "0.6238432", "0.61995864", "0.61978585", "0.61971396", "0.61929345", "0.61929345", "0.61860645", "0.61802983", "0.61736023", "0.61526585", "0.6149053", "0.6139802", "0.61397344", "0.61243635", "0.6118526", "0.6112779", "0.61054444", "0.6092116", "0.60914946", "0.6082324", "0.60777336", "0.60711527", "0.6069716", "0.60613483", "0.605403", "0.6048502", "0.6034887", "0.6031746", "0.6014061", "0.600959", "0.5998713", "0.59934944", "0.5986742", "0.59827936", "0.5968439", "0.59565514", "0.5954757", "0.5949434", "0.59469223", "0.5943456", "0.5942729", "0.59426045", "0.59246373", "0.5901134", "0.5892302", "0.5890831", "0.58824503", "0.5858404", "0.5855141", "0.5851", "0.58473635", "0.5845297", "0.58440304" ]
0.79678607
0
Convert the given time to int.
def _verify_time(given_time): return int(given_time)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_to_int(time):\n minutes = time.hour * 60 + time.minute\n seconds = minutes * 60 + time.second\n return seconds", "def time_to_int(self):\n minutes = self.hour * 60 + self.minute\n seconds = minutes * 60 + self.second\n return seconds", "def time_to_int(str_time):\n dt = time.mktime(\n datetime.datetime.strptime(str_time, \"%Y-%m-%dT%H:%M:%S\").timetuple()\n )\n return dt", "def _intTime(tStr):\n return int(float(tStr))", "def time_to_int(self):\n minutes = self.hour * 60 + self.minute\n secconds = self.minute * 60 + self.second\n return secconds", "def time_to_number(self, time):\n val = len(self.time[0:(\n np.abs(np.array(self.time.values, dtype=np.float32)-np.abs(time))).argmin()])\n if time < 0:\n val = -val\n return val", "def _to_int(self, num):\n assert isinstance(num, Number), 'Is not number in _to_int'\n return floor(self.__tick_to_unit_time * num)", "def _hx_time_to_epoch(self, timestr: str) -> int: # pragma: no cover\n\n time_obj = datetime.datetime.strptime(timestr, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n return int(time_obj.strftime(\"%s\"))", "def raw_time_to_centi(raw_time: str) -> int:\n\n time = int(raw_time[2:7])\n minutes = time // (Time.SECOND * 100)\n\n time = (time % (Time.SECOND * 100)) + minutes * Time.MINUTE\n return time", "def getIntTime():\n return int(time.time())", "def convertGMTToInteger(self, timeStr):\n r = self.__utils.convertGMTToInteger(timeStr)\n #print \"%s - %s\" % (timeStr, r)\n return r", "def calculate_hours(time):\n return int(time / 3600)", "def human_to_timestamp(time: str) -> int:\n try:\n return int(time)\n except ValueError:\n pass\n time = dateparser.parse(time)\n if time is None:\n raise ValueError\n return int(time.timestamp() * 1e6)", "def to_seconds(time):\n return 3600 * time", "def convert_time_to_second(time_string):\n return int(time.mktime(time.strptime(time_string, TIME_PATTERN)))", "def get_sec(time):\n m, s = time[-4:-2], time[-2:]\n if time[-4:2] == '' : m = 0\n if time[-2:] == '' : s = 0\n\n return int(m) * 60 + int(s)", "def parse_date(input_time: str) -> int:\n return int(dateutil.parser.parse(input_time).timestamp() * 1000)", "def litres(time):\n return int(time / 2)", "def convert_time(t):\n minutes = int(t/60)\n seconds = int(t-60*minutes)\n return minutes, seconds", "def timecode(time_now, interval):\n i = time.mktime(time_now.timetuple())\n return int(i / interval)", "def convert_time(timestr):\n # 09:11:13 -> 9*60*60 + 11*60 + 13\n h, m, s = timestr.split(':')\n return int(h) * 3600 + int(m) * 60 + int(s)", "def get_sec(time_str):\n h, m, s = time_str.split(':')\n return int(h) * 3600 + int(m) * 60 + int(s)", "def get_sec(time_str):\n h, m, s = time_str.split(':')\n return int(h) * 3600 + int(m) * 60 + int(s)", "def time(self) -> int:\n return int(round(time.time() * 1000))", "def string_time_to_second(time):\n time_split = time.split(':')\n hour = int(time_split[0])\n minute = int(time_split[1])\n second = int(time_split[2])\n return 3600*hour + 60*minute + second", "def time2iters(time):\n if time < 0:\n raise ValueError(\"Negative time is not allowed.\")\n iters = int(round(time / calibration))\n return iters", "def get_sec(time_str):\n h, m, s = time_str.split(':')\n return int(h) * 3600 + int(m) * 60 + int(float(s))", "def calculate_minutes(time):\n return int(time / 60)", "def convert_time(time_passed):\n\n minutes = time_passed.seconds // 60\n\n return minutes", "def convertTime(time, fromCode=\"SEC\", toCode=\"HOUR\"):\n assert time is not None and (isinstance(time, int) or isinstance(time, float)) and time >= 0, \"Invalid time: \" % (time)\n \n indices = [\"SEC\", \"MIN\", \"HOUR\"]\n assert fromCode is not None and fromCode.strip().upper() in indices, \"Invalid from code: %s\" % (fromCode)\n assert toCode is not None and toCode.strip().upper() in indices, \"Invalid to code: %s\" % (toCode)\n \n fromIdx = indices.index(fromCode.strip().upper())\n toIdx = indices.index(toCode.strip().upper())\n return time * 60 ** (fromIdx - toIdx)", "def hours_in(sec):\r\n return int(sec//3600)", "def get_time(self) -> int:\n t = str(self.eval(\"pyb.RTC().datetime()\").encode(\"utf-8\"))[1:-1].split(\", \")\n return int(t[4]) * 3600 + int(t[5]) * 60 + int(t[6])", "def time(self, time):\n # type: (int) -> None\n\n if time is not None:\n if not isinstance(time, int):\n raise TypeError(\"Invalid type for `time`, type has to be `int`\")\n\n self._time = time", "def convert_time(t):\n return datetime.fromtimestamp(t / 1e7 - 11644473600)", "def parse_timestr(self, timestr):\n\n epoch = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzutc())\n return int((parsedate(timestr) - epoch).total_seconds())", "def to_int(toks):\n return int(toks[0])", "def to_int(self) -> int:\n return self.as_int", "def get_date_as_int(date):\n\n return int(date.replace('-', ''))", "def _convert_time(self, duration):\n in_sec = int(int(duration) / 1000)\n in_time = int(in_sec / 60) + (0.01 * (in_sec % 60))\n return in_time", "def to_db(time):\n return int(time.timestamp())", "def _str2time(self, timestring):\n if not timestring:\n return 0\n\n #\n # NOTE: the time can be larger than is expressible using a 32-bit\n # Python; e.g., 380731122950Z. In this case, the number of seconds will\n # be correct (2164192190L in this case), but this value won't be\n # convertible to a system time_t value.\n #\n return calendar.timegm(\n time.strptime(\n timestring[0:4] + ' ' +\n timestring[4:6] + ' ' +\n timestring[6:8] + ' ' +\n timestring[8:10] + ' ' +\n timestring[10:12] + ' ' +\n timestring[12:14],\n '%Y %m %d %H %M %S'))", "def execute_cast_timestamp_to_integer(op, data, type, **kwargs):\n return data.value", "def SECOND(time):\n\n return _make_datetime(time).second", "def int_to_time(seconds):\n minutes, second = divmod(seconds, 60)\n hour, minute = divmod(minutes, 60)\n time = Time(hour, minute, second)\n return time", "def toint(s):\n try:\n n = int(s)\n except ValueError:\n n = 0\n return n if n >= 0 else 0", "def parse_time(timestr):\n\tif not timestr: return 0\n\tif \":\" not in timestr:\n\t\treturn int(timestr)\n\tneg = timestr.startswith(\"-\") # \"-5:30\" means -330 seconds\n\tmin, sec = timestr.strip(\"-\").split(\":\")\n\ttime = int(min) * 60 + int(sec)\n\tif neg: return -time\n\treturn time", "def dec2int(r: str) -> int:", "def calculate_days(time):\n return int(time / 86400)", "def from_min_to_day(time):\n return str(round(int(time) / (60 * 8), 1))", "def convertTravelTime(traveltime):\n hour = traveltime[0]\n minute = traveltime[2:4]\n minutes = int(hour)*60 + int(minute)\n return minutes", "def int_to_time(seconds):\n time1 = time()\n minutes, time1.second = divmod(seconds, 60)\n time1.hour, time1.minute = divmod(minutes, 60)\n return time1", "def int_to_time(seconds):\n time1 = time()\n minutes, time1.second = divmod(seconds, 60)\n time1.hour, time1.minute = divmod(minutes, 60)\n return time1", "def convert_to_epoch(event_time_date) -> int:\n pattern = '%Y-%m-%d %H:%M'\n return int(time.mktime(time.strptime(event_time_date, pattern)))#to epoch value", "def str2int(self, video_path):\n try:\n return int(video_path)\n except ValueError:\n return video_path", "def heure_to_sec(h: int, m: int, s: int) -> int:\n\n return (h * 3600) + (m * 60) + s", "def increment(time, seconds):\n assert valid_time(time)\n seconds += time.time_to_int()\n return int_to_time(seconds)", "def _TIME2STEPS(time):\n return int(time*1000)", "def increment(time, seconds):\n assert valid_time(time)\n seconds += time_to_int(time)\n return int_to_time(seconds)", "def lon_to_int(lon):\n lon = int((Decimal(lon) * 10000000).quantize(Decimal('1'), rounding=ROUND_HALF_UP))\n return (lon + 1800000000) % 3600000000 - 1800000000", "def convert_time(time):\n\n s = time.split()[0]\n s_h = int(s.split(':')[0])\n\n am_pm = s.split(':')[1][-2:]\n if s_h == 12:\n s_h = s_h - 12\n if am_pm == 'PM':\n s_h = s_h + 12\n s_h = s_h + 1\n\n e = time.split()[2]\n e_h = int(e.split(':')[0])\n\n am_pm = e.split(':')[1][-2:]\n if e_h == 12:\n e_h = e_h - 12\n if am_pm == 'PM':\n e_h = e_h + 12\n e_h = e_h + 1\n\n hour_list = range(s_h, e_h + 1)\n return hour_list", "def cast_int(v):\n try:\n return int(v)\n except ValueError:\n return v", "def date_to_int(obj: \"date\") -> int:\n return (obj - date(1970, 1, 1)).days", "def timeCode(self):\n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n timeInt = int(timeStamp)\n timeCode = str(timeInt-timeInt % 10)\n return timeCode", "def int_to_time(seconds):\n time = Time()\n minutes, time.second = divmod(seconds, 60)\n time.hour, time.minute = divmod(minutes, 60)\n return time", "def convert_sleep(set_time: str) -> int:\n if set_time.isdigit():\n return int(set_time)\n time_dict = {}\n tmp = 0\n for value in set_time.split(\" \"):\n if value.isdigit() is True:\n tmp = int(value)\n else:\n time_dict[value] = tmp\n time_in_sec = 0\n time_in_min = 0\n time_in_hr = 0\n time_in_dy = 0\n for key in time_dict:\n if \"sec\" in key:\n time_in_sec = time_dict[key]\n elif \"min\" in key:\n time_in_min = 60 * time_dict[key]\n elif (\"hour\" in key) or (\"hr\" in key):\n time_in_hr = 60 * 60 * time_dict[key]\n elif (\"day\" in key) or (\"dy\" in key):\n time_in_dy = 60 * 60 * 24 * time_dict[key]\n else:\n print(\"Error: Invalid Value(s) in config file\")\n sys.exit()\n return time_in_sec+time_in_min+time_in_hr+time_in_dy", "def parse_timestamp(input_time: str) -> int:\n ts = int(float(input_time)) # either integer or real UNIX epoch time\n # milliseconds\n if 1e12 < ts < 1e13:\n return round(ts / 1000) * 1000 # round to second\n # seconds\n if 1e9 < ts < 1e10:\n return ts * 1000\n print(f'ERROR: {input_time} is neither in epoch milliseconds or seconds.')\n raise ValueError(\"{input_time} is not a timestamp\")", "def as_int(self):\n return self.as_type(int)", "def parse_utcdatetime(time):\n try:\n val = int(time.strftime('%Y%j'))\n except:\n val = -1\n return val", "def time(self) -> int:\n return self.raw[\"time\"]", "def convert_top(t):\n try:\n m, s = [int(c) for c in t.split(':')]\n return m * 60 + s\n except (AttributeError, ValueError):\n return 0", "def _get_time(self, sec, nsec):\n return sec + nsec / (10**9)", "def participant_id_to_int(participant_id):\n\n return int(participant_id[1:])", "def hours_to_minutes(hours) -> int:\n return int(hours) * 60", "def seconds_in(sec):\r\n return int(sec - (hours_in(sec)*3600) - (minutes_in(sec)*60))", "def _get_timestamp(self, timestamp):\n return int(timestamp * 1e6)", "def second_to_minute(time):\n if time % 60 != 0:\n time = time + 60\n return time // 60", "def calculate_seconds_in_hours(hours):\n return int(hours * 3600)", "def castle_counter_to_int(castle_counter_val):\n return int(struct.unpack('q', castle_counter_val)[0])", "def _to_milliseconds(self, time):\n if isinstance(time, dt.datetime):\n return int(time.timestamp() * 1e3)\n elif isinstance(time, int):\n return time\n else:\n raise NotImplementedError(\"Time format not supported. Use epochs, Datetime or Pandas Datetime\")", "def t_sec(self):\n return self.t/self.parameters['time_conversion']", "def to_int(variable):\n try:\n return int(variable)\n except ValueError:\n return variable", "def epoch2time(time):\n\tvalue = datetime.datetime.fromtimestamp(time)\n\tNormal = value.strftime('%Y-%m-%d %H:%M:%S')\n\tprint(normal)\n\treturn normal", "def time_encoded(self, time_encoded):\n # type: (int) -> None\n\n if time_encoded is not None:\n if not isinstance(time_encoded, int):\n raise TypeError(\"Invalid type for `time_encoded`, type has to be `int`\")\n\n self._time_encoded = time_encoded", "def to_ios_time(self):\n try:\n dt_obj = duparser.parse(timestamp)\n self.out_iostime = str(int(((dt_obj - self.epoch_2001).total_seconds()) * self.nano_2001))\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_iostime = False\n return self.out_iostime", "def datetime_to_time(day, time):\n try:\n n_day = _day_map.index(day) * 60 * 24\n except KeyError as e:\n raise Exception(\"Invalid date string '{}'\".format(day))\n hour = int(time[:2])\n if hour > 23:\n raise Exception(\"Invalid hour {}\".format(hour))\n minutes = int(time[3:])\n n_time = hour*60 + minutes\n return n_day + n_time", "def recover_time(adjusted_time):\n time_in_s = adjusted_time + glob.base_time\n return time_in_s", "def delay_as_int(delay):\n return 0 if delay is None else int(delay)", "def __parseTimeArg(self, timeStr):\n \n # Number of seconds.\n retVal = 0\n \n ### NOT YET IMPLEMENTED. NEEDS TO BE DONE!\n \n \n return retVal", "def int_to_python(self, value):\r\n return int(value)", "def convert_time_to_seconds(self, time_value):\n time_epoch = []\n mylog.debug('Converting %s to epoch time' % time_value)\n for value in time_value:\n try:\n pattern = ' %I:%M:%S%p'\n time_epoch_mini = int(time.mktime(time.strptime(value, pattern))) \n time_epoch.append(time_epoch_mini)\n except:\n mylog.debug('%s Does not seem to be in format with leading space' % value)\n try:\n pattern = '%I:%M:%S%p'\n time_epoch_mini = int(time.mktime(time.strptime(value, pattern))) \n time_epoch.append(time_epoch_mini)\n except:\n mylog.debug('%s Does not appear to be in format without leading space' % value)\n return time_epoch", "def lat_to_int(lat):\n lat = int((Decimal(lat) * 10000000).quantize(Decimal('1'), rounding=ROUND_HALF_UP))\n return min(900000000, max(-900000000, lat))", "def toInteger(data):\n\tif isInteger(data):\n\t\treturn data\n\telse:\n\t\treturn ord(data)", "def filetime_to_time(filetime):\r\n total = filetime.dwHighDateTime << 32 | filetime.dwLowDateTime\r\n return total / 10000000 - SECONDS_BETWEEN_EPOCHS", "def round_time(self, time):\n hour, mins, _ = time.split(\":\")\n return '{:02d}:00:00'.format(int(hour)+1 ) if int(mins) >= 30 else '{:02d}:00:00'.format(int(hour))", "def hex2int(r: str) -> int:", "def sbetime2unixtime(value):\n if not isinstance(value, int):\n raise InstrumentParameterException(\"value not a int\")\n\n return SBE_EPOCH + value", "def timestamp2sec(timestamp):\n return (int(timestamp.seconds) + 60 * int(timestamp.minutes) + 3600 * int(timestamp.hours) + float(int(timestamp.hours) / 1000))", "def from_ios_time(self):\n try:\n dt_obj = (int(ios) / int(self.nano_2001)) + 978307200\n self.in_iostime = dt.utcfromtimestamp(dt_obj).strftime('%Y-%m-%d %H:%M:%S.%f')\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.in_iostime = False\n return self.in_iostime", "def int_convert_to_minute(value):\n min = int(int(value) / 60)\n sec = int(int(value) % 60)\n return \"%02d\" % min + \":\" + \"%02d\" % sec", "def convertir_a_int(data):\n data[\"leds\"] = int(data[\"leds\"], 2)\n data[\"pulso\"] = int(data[\"pulso\"], 2)\n imprimir_data(data)" ]
[ "0.8105064", "0.7688117", "0.74488795", "0.7387409", "0.7036349", "0.6889854", "0.67945206", "0.6776978", "0.6736359", "0.6525225", "0.64746207", "0.6470068", "0.64368486", "0.6410069", "0.61911714", "0.61693335", "0.6147289", "0.61194974", "0.6039064", "0.6005749", "0.594594", "0.5945932", "0.5945932", "0.59436154", "0.5920274", "0.5889817", "0.5881761", "0.58767045", "0.5875274", "0.5846238", "0.58416015", "0.5836116", "0.58190984", "0.58161455", "0.5813389", "0.58053476", "0.58019155", "0.5800649", "0.5792415", "0.57569045", "0.5754624", "0.57221884", "0.5651407", "0.56454927", "0.564491", "0.56357634", "0.5617328", "0.5611874", "0.5599968", "0.55660635", "0.55655813", "0.55655813", "0.5562096", "0.5559773", "0.5539716", "0.5536792", "0.55227125", "0.551454", "0.5511684", "0.55087626", "0.54906136", "0.54869527", "0.54861355", "0.5480303", "0.54245883", "0.5423394", "0.54026264", "0.5399306", "0.5389623", "0.53751177", "0.53615075", "0.5360825", "0.5357425", "0.5355647", "0.5347805", "0.53436583", "0.53294265", "0.5324396", "0.5321895", "0.53009343", "0.5294959", "0.5283848", "0.52749234", "0.5268675", "0.5261585", "0.5249442", "0.52491325", "0.52462554", "0.5228465", "0.52237564", "0.5216642", "0.52151906", "0.5212246", "0.52118444", "0.52094996", "0.520303", "0.5193729", "0.5157556", "0.5150752", "0.5148632" ]
0.6239351
14
Convert the given object to a UUID string if it's not yet one.
def _verify_uuid(given_uuid): if isinstance(given_uuid, str) or isinstance(given_uuid, unicode): # Verify the given string is well-formed uuid.UUID(given_uuid) return given_uuid if isinstance(given_uuid, uuid.UUID): return given_uuid.__str__() raise ValueError("Given object is neither a string nor a UUID object.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uuid(self, obj: typing.Any = None) -> str:\n if obj is None:\n obj = self.randomString()\n self._counter += 1\n elif isinstance(obj, bytes):\n obj = obj.decode('utf8') # To binary\n else:\n obj = '{}'.format(obj)\n\n return str(uuid.uuid5(self._namespace, obj)).lower() # I believe uuid returns a lowercase uuid always, but in case... :)", "def get_uuid(self, obj):\n return IUUID(obj, None)", "def get_uuid(self, obj):\n return IUUID(obj, None)", "def _generate_uuid_str_if_none(given_uuid):\n\t\treturn given_uuid or uuid.uuid4().__str__()", "def uuid(_uuid=uuid4):\n return str(_uuid())", "def uuid(o):\n return sqlite3.Binary(o.bytes)", "def _get_uuid():\n return str(uuid.uuid4())", "def _uuid(self):\n u = self.__uuid\n if u is None:\n u = str(uuid.uuid1())\n self._set_uuid(u)\n return u", "def is_uuid(my_object):\n try:\n my_uuid = uuid.UUID(my_object, version=4)\n except ValueError:\n return False\n return str(my_uuid) == my_object", "def get_uuid():\n\n x = uuid.uuid1()\n return str(x)", "def get_uuid():\n return str(uuid4())", "def getUUID():\n return str(uuid.uuid4())", "def get_uuid():\n\n return str(uuid.uuid4())", "def to_json(self, obj):\n _dict = obj._to_dict()\n if ID not in _dict or _dict[ID] is None:\n _dict[ID] = str(uuid.uuid4())\n json_str = json.dumps(_dict, indent=4)\n return json_str", "def uuid_(identifier: Optional[uuid.UUID]) -> Optional[str]:\n if identifier is None:\n return None\n\n return str(identifier)", "def to_uuid(string):\n if sys.version_info[0] == 2:\n string = string.encode('utf-8')\n \n # This the seed Ansible has chosen for their UUID's\n return str(uuid.uuid5(uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E'), string))", "def obj_to_uid(obj):\n object_ser = pickle.dumps(obj)\n serialised_b64 = base64.encodebytes(object_ser)\n unique_id = serialised_b64.decode()\n return unique_id", "def _make_uuid():\n parts = [Record._hex_string(k) for k in Record.UUID_PARTS]\n return \"-\".join(parts)", "def getid(obj):\n\n # Try to return the object's UUID first, if we have a UUID.\n try:\n if obj.uuid:\n return obj.uuid\n except AttributeError:\n pass\n try:\n return obj.id\n except AttributeError:\n return obj", "def build_uuid(self):\n self._uuid = str(uuid.uuid1())\n return self._uuid", "def set_uuid(self, device):\n import uuid\n\n return str(uuid.uuid4())", "def uuid(self) -> str:\n return self.obj.uuid", "def _generate_uuid():\n return str(uuid.uuid4())", "def _NewUUIDString ():\n if __HaveUUID:\n return uuid.uuid1().urn\n return '%s:%08.8x' % (time.strftime('%Y%m%d%H%M%S'), random.randint(0, 0xFFFFFFFF))", "def ordered_uuid(value=None):\n if not HAVE_ORDERED_UUID:\n raise RuntimeError(\"ordered_uuid package: not found\")\n if not value:\n value = str(uuid.uuid1())\n return OrderedUUID(value)", "def test_api_object_uuid(self, api_object):\n assert isinstance(api_object.uuid_, uuid.UUID)", "def get_object_uuid(remote, object_id):\n cmd = mmapi.StoredCommands()\n cmd_key = cmd.AppendSceneCommand_GetObjectUUID(object_id)\n remote.runCommand(cmd)\n byte_vec = mmapi.vectorub()\n cmd.GetSceneCommandResult_GetObjectUUID(cmd_key, byte_vec)\n return vectorub_to_string(byte_vec)", "def cb_uuid_to_str(_uuid: CBUUID) -> str:\n _uuid = _uuid.UUIDString()\n if len(_uuid) == 4:\n return \"0000{0}-0000-1000-8000-00805f9b34fb\".format(_uuid.lower())\n # TODO: Evaluate if this is a necessary method...\n # elif _is_uuid_16bit_compatible(_uuid):\n # return _uuid[4:8].lower()\n else:\n return _uuid.lower()", "def gen_uuid():\n return str( uuid.uuid4() )", "def gen_uuid():\n return str( uuid.uuid4() )", "def gen_uuid() -> str:\n return str(uuid4())", "def gen_uuid():\n return str(uuid.uuid4())", "def _tostr(obj): # pragma: no cover\n return obj if isinstance(obj, str) else obj.decode()", "def generate_uuid(basedata=None):\n if basedata is None:\n return str(uuid.uuid4())\n elif isinstance(basedata, str):\n checksum = hashlib.md5(basedata.encode()).hexdigest()\n return str(uuid.UUID(checksum))\n else:\n raise TypeError(\"The 'basedata' must be string or None\")", "def uuid(self):\n if self._uuid is not None:\n return self._uuid\n if not self.exists:\n return None\n self.retr_uuid()\n return self._uuid", "def getuuid(value, table, table_attrib, error_tail):\n if value is None:\n return value\n\n elif modelfor(value, table):\n value = getattr(value, table_attrib, None)\n if value is None:\n raise ValueError(\"null id provided for %s\" % error_tail)\n return value\n\n # if a string was provided then we should\n # try to convert it into a uuid first to\n # be sure it's valid\n elif isinstance(value, STRING_TYPES):\n UUID(value)\n return value\n\n elif isinstance(value, UUID):\n return str(value)\n\n else:\n raise ValueError(\"failed to determine %s\" % error_tail)", "def makeid(cls):\n return str(uuid.uuid4().hex)", "def get_uuid(s):\n sha = sha256(s.encode('utf-8')).hexdigest()\n uuid = UUID(sha[:32])\n return str(uuid)", "def _fix_uuids(self, instance):\n for atom in instance.atoms():\n if isinstance(atom.value, uuid.UUID):\n setattr(instance, atom.name, str(atom.value))\n\n return instance", "def u(obj):\n return obj if isinstance(obj, str) else str(obj)", "def get_uuid(device):\n uuids = uuid_table()\n return str(uuids[device])", "def param2id(object_id):\r\n if '-' in object_id:\r\n return ec2utils.ec2_vol_id_to_uuid(object_id)\r\n else:\r\n return object_id", "def uuid(self):\n return UUID(self.unique_id)", "def generateUUID(): # pylint: disable=C0103\r\n return str(uuid.uuid4())", "def get_uuid(self, obj):\n if \"opensim\" in obj.properties:\n if \"uuid\" in obj.properties[\"opensim\"]:\n return obj.properties['opensim']['uuid']", "def uuid(self, value):\n self.unique_id = UUID(str(value)).hex", "def test_to_string() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid)\n assert str(obj) == \"<SpecificLocation: fb1bb0675bb74c49becee700ab0a1514>\"\n assert obj.__repr__() == \"<SpecificLocation: fb1bb0675bb74c49becee700ab0a1514>\"", "def _retrieve_object_id(obj: Optional[Union[\"Base\", str]]) -> Optional[str]:\n # Check whether the obj is an object of any subclass of Base, or uuid type\n from pykechain.models import Base\n\n if issubclass(type(obj), Base):\n return obj.id\n elif isinstance(obj, str) and is_uuid(obj):\n return obj\n elif isinstance(obj, type(None)):\n return None\n else:\n raise IllegalArgumentError(\n \"When adding the widget, obj must be an instance of `Base` or an object id. \"\n \"Type is: {}\".format(type(obj))\n )", "def _json_serialize(obj: Any) -> str:\n if isinstance(obj, bytes):\n if len(obj) < 256:\n try:\n return obj.hex()\n except Exception:\n pass\n else:\n try:\n return obj.decode()\n except Exception:\n pass\n return '<not serializable>'", "def _generate_uuid(self):\n\n return uuid.uuid4()", "def str_(object_):\n return str(object_)", "def uuid(seed):\n return uuid4().get_hex()", "def id_str(self):\n if hasattr(self, 'id'):\n return str(self.id)\n else:\n return 'obj%s' % id(self)", "def _fix_guid(config, guid):\n if config['dir_guid_source'] == 'objectGUID':\n return str(\n uuid.UUID(bytes_le=guid)\n )\n else:\n return guid", "def uuid(self):\n raise NotImplementedError", "def uuid(self):\n raise NotImplementedError", "def uuid(self):\n raise NotImplementedError", "def asString(obj):\n if type(obj) in _STR_TYPES:\n return obj\n return str(obj)", "def stringify(obj):\n tp = type(obj)\n if issubclass(tp, basestring):\n return obj\n elif hasattr(tp, '__unicode__'):\n s = tp.__unicode__(obj)\n if not isinstance(s, basestring):\n raise TypeError('__unicode__ did not return a string')\n return s\n elif hasattr(tp, '__str__'):\n s = tp.__str__(obj)\n if not isinstance(s, basestring):\n raise TypeError('__str__ did not return a string')\n return s\n else:\n return str(obj)", "def uuid(self, value):\n if value is not None:\n self.keystore['id'] = value\n elif 'id' in self.keystore:\n self.keystore.pop('id')", "def generate_uuid(value, org_id_prefix, org_name=None):\n # TODO: Refactor to avoid duplication\n if org_id_prefix:\n base_hash = hashlib.md5(org_id_prefix.encode())\n else:\n base_hash = hashlib.md5(org_name.encode())\n\n base_digest = base_hash.hexdigest()\n base_uuid = uuid.UUID(base_digest)\n\n combined_value = (str(base_uuid) + str(value)).encode()\n value_hash = hashlib.md5(combined_value)\n value_digest = value_hash.hexdigest()\n value_uuid = str(uuid.UUID(value_digest))\n return value_uuid", "def test_uuid_default(self):\r\n default = uuid.uuid4()\r\n prop = UUID(default=default, required=True)\r\n self.assertEqual(prop.to_database(None), prop.to_database(default))", "def sortable_time_uuid_str(uuid):\n return flip_uuid_parts(str(uuid))", "def uuid(self, *args ):\n t = int( time.time() * 1000 )\n r = int( random.random() * 100000000000000000 )\n try:\n a = socket.gethostbyname( socket.gethostname() )\n except:\n # if we can't get a network address, just imagine one\n a = random.random() * 100000000000000000\n data = str(t) + ' ' + str(r) + ' ' + str(a) + ' ' + str(args)\n data = md5.md5(data).hexdigest()\n\n return data", "def uuid( *args ):\n t = long( time.time() * 1000 )\n r = long( random.random()*100000000000000000L )\n try:\n a = socket.gethostbyname( socket.gethostname() )\n except:\n # if we can't get a network address, just imagine one\n a = random.random()*100000000000000000L\n data = str(t)+' '+str(r)+' '+str(a)+' '+str(args)\n data = hashlib.md5(data).hexdigest()\n return data", "def ustr(obj):\n if IS_PY2:\n # If we are getting a string, then do an explicit decode\n # else, just call the unicode method of the object\n if type(obj) in [str, basestring]: # pragma: no cover # noqa\n return unicode(obj, DEFAULT_ENCODING) # pragma: no cover # noqa\n else:\n return unicode(obj) # pragma: no cover # noqa\n else:\n if type(obj) in [bytes]:\n return obj.decode(DEFAULT_ENCODING)\n else:\n return str(obj)", "def generate_uuid():\n return f'{uuid.uuid1()}'", "def stringify(obj):\n tp = type(obj)\n if issubclass(tp, basestring):\n return obj\n elif hasattr(tp, '__unicode__'):\n s = tp.__unicode__(obj)\n if not isinstance(s, basestring):\n raise TypeError, '__unicode__ did not return a string'\n return s\n elif hasattr(tp, '__str__'):\n s = tp.__str__(obj)\n if not isinstance(s, basestring):\n raise TypeError, '__str__ did not return a string'\n return s\n else:\n return str(obj)", "def _stringify(obj):\r\n if isinstance(obj, unicode):\r\n return obj.encode('utf-8')\r\n elif isinstance(obj, str):\r\n return obj\r\n else:\r\n raise TypeError('Object is not a string.')", "def uuid(self) -> str:\n return self.__uuid", "async def uuid(self) -> str:\n if not hasattr(self, \"_uuid\"):\n result = await self.app.sparql.query(\"\"\"\n SELECT DISTINCT ?o\n WHERE {\n <{{uri}}> <http://mu.semte.ch/vocabularies/core/uuid> ?o .\n }\n \"\"\", uri=self.uri)\n self._uuid = result['results']['bindings'][0]['o']['value']\n return self._uuid", "def _format_uuid(self, uuid):\n uuid_format = self.uuid_format\n uuid_list=uuid_format.split(\"-\")\n pad=len(uuid_list[-1])\n last_element=uuid.zfill(pad)\n formatted_uuid=uuid_format.replace(uuid_list[-1], last_element)\n return formatted_uuid", "def generate_uid_from_pbobject(pb_object):\n json_string = json.dumps(\n MessageToDict(pb_object, including_default_value_fields=True, preserving_proto_field_name=True),\n indent=2,\n sort_keys=True\n )\n out = StringIO()\n out.write(json_string)\n uid = hashlib.sha1(out.getvalue().encode('utf-8')).hexdigest()\n out.close()\n return uid", "def uuid(self, type, val):\n picker = lambda x: x.get('uuid', x)\n return self._get((type, val), picker)", "def safe_str(_object: Any) -> str:\n try:\n return str(_object)\n except Exception as error: # noqa: BLE001\n return f\"<str-error {str(error)!r}>\"", "def uuid():\n from dallinger.experiment import Experiment\n\n click.echo(Experiment.make_uuid())", "def _make_uuid(val):\n h = hashlib.md5(val).hexdigest()\n return '{0}-{1}-{2}-{3}-{4}'.format(\n h[:8], h[8:12], h[12:16], h[16:20], h[20:])", "def UUID(self, default=None):\n return self.data.get('uuid', default)", "def render_item(type_, obj, autogen_context):\n\n if type_ == 'type' and isinstance(obj, sqlalchemy_utils.types.uuid.UUIDType):\n # add import for this type\n autogen_context.imports.add(\"import sqlalchemy_utils\")\n autogen_context.imports.add(\"import uuid\")\n return \"sqlalchemy_utils.types.uuid.UUIDType(binary=False, native=False), default=uuid.uuid4, unique=True\"\n\n\n # default rendering for other objects\n return False", "def serialize_str(self, obj):\n if len(obj) < 0x100:\n return 'U' + struct.pack('<B', len(obj)) + obj\n return 'T' + struct.pack('<I', len(obj)) + obj", "def from_uuid(self):\n reason = \"[!] UUID's are in the format 00000000-0000-0000-0000-000000000000\"\n ts_type = self.ts_types['uu']\n try:\n uuid_lower = self.uu.lower()\n UUID_REGEX = re.compile('[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')\n if not bool(UUID_REGEX.match(uuid_lower)):\n self.in_uuid = indiv_output = combined_output = False\n pass\n else:\n u = uuid.UUID(uuid_lower)\n if u.version == 1:\n unix_ts = int((u.time / 10000) - 12219292800000)\n self.in_uuid = dt.utcfromtimestamp(float(unix_ts) /1000.0).strftime('%Y-%m-%d %H:%M:%S.%f')\n else:\n pass\n indiv_output = str(\"{} {}\".format(ts_type, self.in_uuid))\n combined_output = str(\"{}{}\\t\\t\\t{} UTC{}\".format(self.left_color, ts_type, self.in_uuid, self.right_color))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_uuid = indiv_output = combined_output = False\n return self.in_uuid, indiv_output, combined_output, reason", "def json_serial(obj):\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial", "def uuid(self) -> str:\n return self._uuid", "def uuid(self) -> str:\n return self._uuid", "def uuid(self) -> str:\n return self._uuid", "def uuid(self):\n\n return self._get_field(\"uuid\")", "def genUuid(seq=None):\n if seq is not None:\n return uuid.uuid1().hex + uuid.uuid3(uuid.NAMESPACE_DNS, seq).hex\n return uuid.uuid1().hex + uuid.uuid3(\n uuid.NAMESPACE_DNS, uuid.uuid1().hex).hex", "def encode(uuid_):\n return base64.urlsafe_b64encode(uuid_.bytes)[:-2] # Drop '==' padding", "def uuid(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"uuid\")", "def serialize(obj):\n serial = repr(obj)\n try:\n if eval(serial) == obj:\n return serial\n except:\n pass\n try:\n serial = pickle.dumps(obj)\n return 'pickle.loads(%s)' % repr(serial)\n except:\n raise Exception #unable to serialize", "def serialize(obj):\n\n if isinstance(obj, datetime.datetime):\n serial = obj.replace(microsecond=0).replace(tzinfo=None).isoformat() + \"Z\"\n return serial\n\n if isinstance(obj, bytes):\n return obj.decode('utf-8')\n\n return obj.__dict__", "def serialize(obj):\n\n if isinstance(obj, datetime.datetime):\n serial = obj.replace(microsecond=0).replace(tzinfo=None).isoformat() + \"Z\"\n return serial\n\n if isinstance(obj, bytes):\n return obj.decode('utf-8')\n\n return obj.__dict__", "def _generate(self, hashed = True):\r\n\r\n identifier = str(uuid.uuid4())\r\n identifier = identifier.upper()\r\n if not hashed: return identifier\r\n identifier = legacy.bytes(identifier)\r\n hash = hashlib.sha256(identifier)\r\n identifier = hash.hexdigest()\r\n identifier = identifier.upper()\r\n return identifier", "def generate_uuid():\n return uuid.uuid4().hex", "def uuid(self):\n return self.__uuid", "def uuid(self):\n return self.__uuid", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()", "def id(obj):\n return obj", "def serialize(obj):\n\n # if isinstance(obj, date):\n # serial = obj.isoformat()\n # return serial\n #\n # if isinstance(obj, time):\n # serial = obj.isoformat()\n # return serial\n\n return obj.to_json()", "def json_serializer(obj):\n if isinstance(obj, (datetime.datetime, datetime.date)):\n serial = obj.isoformat()\n return serial" ]
[ "0.814474", "0.77772284", "0.77772284", "0.7146047", "0.69617623", "0.6821387", "0.68103266", "0.6805069", "0.6775859", "0.67503506", "0.66954255", "0.65914947", "0.65580595", "0.6542236", "0.6490117", "0.6463487", "0.64372617", "0.6394203", "0.63477486", "0.63292855", "0.6329223", "0.6306714", "0.630394", "0.6292475", "0.62581056", "0.6217312", "0.62160146", "0.61964715", "0.6183759", "0.6183759", "0.61685973", "0.61652005", "0.6155864", "0.61352974", "0.6092972", "0.6059519", "0.6052815", "0.6049931", "0.6048207", "0.6039697", "0.6033138", "0.6019421", "0.600067", "0.59810746", "0.5973424", "0.5940123", "0.5933571", "0.59263694", "0.592425", "0.592218", "0.5883684", "0.5866736", "0.58619374", "0.5859591", "0.5856852", "0.5856852", "0.5856852", "0.5834694", "0.5829683", "0.58081186", "0.5775321", "0.57684815", "0.57674813", "0.57579184", "0.5754231", "0.5749863", "0.57439935", "0.5739462", "0.5736756", "0.5734435", "0.57243615", "0.56961566", "0.56954545", "0.56932235", "0.56781673", "0.5677415", "0.567706", "0.5674414", "0.56725323", "0.5649302", "0.56439495", "0.563924", "0.5623607", "0.5623607", "0.5623607", "0.5606757", "0.56014985", "0.559903", "0.5594387", "0.55850357", "0.5581569", "0.5581569", "0.5570584", "0.5568562", "0.5559925", "0.5559925", "0.555607", "0.55542785", "0.5549461", "0.55486643" ]
0.6219394
25
read 4 chars to buf
def read4(buf): return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read(self, buf, n):\n l = min(len(self.prev), n)\n buf[:l] = self.prev[:l]\n self.prev = self.prev[l:] # pitfall self.prev = []\n\n idx = l # the next reading\n while idx < n:\n buf4 = [\"\" for _ in xrange(4)]\n r = read4(buf4)\n if idx+r < n:\n buf[idx:idx+r] = buf4[:r]\n idx += r\n if r < 4: return idx\n else:\n buf[idx:n] = buf4[:n-idx]\n self.prev = buf4[n-idx:r] # pitfall buf4[n-idx:]\n idx = n\n\n return idx", "def readinto(self, buf: bytes, nack: bool = True, /) -> None:", "def readinto(self, buf: bytes, nack: bool = True, /) -> None:", "def readinto(self, buf: bytes, /) -> Optional[int]:", "def _read_fixed(buf, length):\n result = buf.read(length)\n actual = len(result)\n if actual != length:\n raise EndOfMessage(False if actual == 0 else True)\n return result", "def readbuf(self, n):\n if n == 0:\n return ''\n try:\n msg = self.sock.recv(n)\n except BaseException:\n msg = ''\n n2 = min(n - len(msg), n / 2)\n return msg + self.readbuf(n2)", "def serial_read(self, size):\n line=''\n # How many chars in the buffer\n actualsize = len(self.buffer)\n # maximal the avialable chars\n if size > actualsize:\n size = actualsize\n linebuf = self.buffer[:size]\n self.buffer = self.buffer[size:]\n for c in linebuf:\n line += chr(c)\n return line", "def _read_next_bytes(\n fid, num_bytes, format_char_sequence, endian_character=\"<\"\n ):\n data = fid.read(num_bytes)\n return struct.unpack(endian_character + format_char_sequence, data)", "def _read_chunked(self):\r\n buf = \"\"\r\n size = 1\r\n while size:\r\n size = int(self._read_line(), 16)\r\n buf += self._read_num_bytes(size)\r\n self._read_num_bytes(2) # CRLF\r\n return buf", "def read(self, n=1):\n s = self._RX_buf[0:n]\n self._RX_buf = self._RX_buf[n:]\n # print(\"read op occurred: RX_buf = {}\".format(self._RX_buf), end='\\n\\n')\n return s # bytes(s, encoding='ascii')", "def readinto(self, buf: bytes, nbytes: int, /) -> Optional[int]:", "def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character=\"<\"):\n data = fid.read(num_bytes)\n return struct.unpack(endian_character + format_char_sequence, data)", "def readinto(self, buf: bytes, write: int = 0x00, /) -> Optional[int]:", "def readinto(self, buf: bytes, write: int = 0x00, /) -> Optional[int]:", "def parse(self, data):\n self._readahead.write(data)\n buf = self._readahead.getvalue()\n if len(buf) < 4:\n return\n while len(buf) >= 4:\n size = int(buf[:4], 16)\n if size == 0:\n self.handle_pkt(None)\n buf = buf[4:]\n elif size <= len(buf):\n self.handle_pkt(buf[4:size])\n buf = buf[size:]\n else:\n break\n self._readahead = BytesIO()\n self._readahead.write(buf)", "def read_bytes(self, number_of_bytes):\n\n self.index = -1\n data = self.buf[self.offset:self.offset + number_of_bytes]\n self.offset += number_of_bytes\n\n return data", "def readinto(self, buf: bytearray, nack: bool = True) -> None:\n ...", "def read(self, l):\n\t\twhile l > len(self.buf):\n\t\t\tself.buf += self.conn.recv(4096)\n\n\t\tobuf = self.buf\n\t\tself.buf = obuf[l:]\n\t\treturn obuf[:l]", "def _read_to_buffer(cls, buf, stream):\n # We could read it in one step, but instead we'll read it in chunks to avoid big temporaries.\n # (See below.)\n # buf[:] = stream.read( len(buf) )\n\n # Read data from the stream in chunks\n remaining_bytes = len(buf)\n while remaining_bytes > 0:\n next_chunk_bytes = min( remaining_bytes, VoxelsNddataCodec.STREAM_CHUNK_SIZE )\n chunk_start = len(buf)-remaining_bytes\n chunk_stop = len(buf)-(remaining_bytes-next_chunk_bytes)\n buf[chunk_start:chunk_stop] = stream.read( next_chunk_bytes )\n remaining_bytes -= next_chunk_bytes", "def _decode_str(self, buf):\n length = self._decode_vint(buf)\n result = buf.read(length)\n if len(result) != length:\n raise EndOfMessage(True)\n return result", "def read_char(data):\n s_type = \"=%s\" % get_type(\"char\")\n return struct.unpack(s_type, data.read(1))[0]", "def _read_bytes(self, start, count): # type: (int) -> bytes\n bytes_data = self._buffer[start:start + count]\n\n if len(bytes_data) != count:\n raise ASN1WantMore('Premature end of input.')\n\n return bytes_data", "def __read_block(self, size):\n buf = b\"\"\n if len(self.__read_buffer):\n limit = (\n size if size <= len(self.__read_buffer) else\n len(self.__read_buffer)\n )\n buf = self.__read_buffer[:limit]\n self.__read_buffer = self.__read_buffer[limit:]\n size -= limit\n if not size:\n return buf\n try:\n buf += self.sock.recv(size)\n except (socket.timeout, ssl.SSLError):\n raise Error(\"Failed to read %d bytes from the server\" % size)\n self.__dprint(buf)\n return buf", "def _get_data(self, read_size):\n return self._character_device.read(read_size)", "def read_pkt_line(self):\n if self._readahead is None:\n read = self.read\n else:\n read = self._readahead.read\n self._readahead = None\n\n try:\n sizestr = read(4)\n if not sizestr:\n raise HangupException()\n size = int(sizestr, 16)\n if size == 0:\n if self.report_activity:\n self.report_activity(4, \"read\")\n return None\n if self.report_activity:\n self.report_activity(size, \"read\")\n pkt_contents = read(size - 4)\n except socket.error as e:\n raise GitProtocolError(e)\n else:\n if len(pkt_contents) + 4 != size:\n raise GitProtocolError(\n \"Length of pkt read %04x does not match length prefix %04x\"\n % (len(pkt_contents) + 4, size)\n )\n return pkt_contents", "def read(self, size=-1):\n if not self._buf:\n self._buf.append(next(self._iter, b''))\n if len(self._buf[0]) < size or size < 0:\n return self._buf.pop(0)\n block = self._buf.pop(0)\n self._buf.insert(0, block[size:])\n return block[:size]", "def chars(count):\n\n global offset\n\n bytes=midifile[offset:offset+count]\n offset+=count\n return bytes", "def readline(self):\n returnIndex = self._RX_buf.index(\"\\n\") # \\r\\n technically\n if returnIndex != -1:\n s = self._RX_buf[0:returnIndex + 1]\n self._RX_buf = self._RX_buf[returnIndex + 1:]\n return s # bytes(s, encoding='ascii') # s\n else:\n return 0x04 # ''", "def read_nchars(string, n=1):\n return string[:n]", "def read_buffer(serial):\r\n resp = serial.read_all()\r\n return resp.decode()", "def read(self, n):\n msg = self.readexactly(n)\n n2 = n - len(msg)\n if n2 > 0:\n msg += self.readbuf(n2)\n return msg", "def readinto(self, buf: Any, nbytes: int=-1) -> int:\n ...", "def read( self, bytes=1024 ):\n count = len( self.readbuf )\n if count < bytes:\n data = os.read( self.stdout.fileno(), bytes - count )\n self.readbuf += data\n if bytes >= len( self.readbuf ):\n result = self.readbuf\n self.readbuf = ''\n else:\n result = self.readbuf[ :bytes ]\n self.readbuf = self.readbuf[ bytes: ]\n return result", "def _read_num_bytes(self, num):\r\n buf = \"\"\r\n while len(buf) < num:\r\n chunk = self.sock.recv(num - len(buf))\r\n if not chunk:\r\n raise SocketClosedException\r\n buf += chunk\r\n return buf", "def socks4_recv_request(buf):\n chunk = buf.recvn(8)\n version, cmd, port, ip = struct.unpack(\"!BBHI\", chunk)\n\n if ip > 0 and ip < 256:\n # version 4a\"\n ip = None\n else:\n ip = tools.i_to_ipv4(ip)\n # early validation\n if version != 4 or cmd not in (CMD_CONNECT,):\n uid = ''\n else:\n uid = buf.recvstr0()\n if ip is None:\n ip = buf.recvstr0()\n r = {\"version\" : version, \"cmd\" : cmd, \"address\" : (ip, port), \"uid\" : uid}\n logger.debug(\"SOCKS4 received request from %r: %r\", buf.getpeername(), r)\n return r", "def readinto(self, buf: AnyWritableBuf, nbytes: int, /) -> int | None:", "def readinto(self, buf: AnyWritableBuf, /) -> int | None:", "def readinto(self, buf: AnyWritableBuf, /) -> int | None:", "def _read_char(self):\n if self.read_pos >= len(self.data):\n self.char = \"\"\n else:\n self.char = self.data[self.read_pos]\n\n self.pos = self.read_pos\n self.read_pos += 1", "def readNetstring(self, size=2048):\n # Read until we have at least 4 bytes\n while not self.recvLength():\n self.__data += self.sock.recv(size)\n while not self.recvData():\n self.__data += self.sock.recv(size)\n while not self.recvComma():\n self.__data += self.sock.recv(size)\n string = self.__buffer\n self.__buffer = ''\n if self.verbose:\n print \"controller:\",string\n return string", "def readinto(self, buf: bytearray, nbytes: Optional[int] = None) \\\n -> Optional[int]:\n ...", "def readinto(self, buf: AnyWritableBuf, maxlen: int, /) -> int | None:", "def read_data(self, size, attempts = 1):\n data = Array('B')\n # do we have all of the data in the read buffer?\n if size <= len(self.rdbuf) - self.rdofs:\n data = self.rdbuf[self.rdofs : self.rdofs + size]\n self.rdofs += size\n return data\n # do we have some of the data in the read buffer?\n if len(self.rdbuf) - self.rdofs > 0:\n data = self.rdbuf[self.rdofs:]\n # do a usb read to get the rest...\n # read from the usb device\n try:\n bytes_to_rd = size - len(data)\n while bytes_to_rd > 0:\n # read from the usb device\n while True:\n self.rdbuf = self._read()\n self.rdofs = 0\n if len(self.rdbuf) > 0:\n break\n else:\n # no data received\n attempts -= 1\n if attempts > 0:\n # try again\n continue\n # return what we have\n return data\n # copy the read buffer into the returned data\n n = len(self.rdbuf)\n if n >= bytes_to_rd:\n # copy a partial read buffer\n data += self.rdbuf[:bytes_to_rd]\n self.rdofs = bytes_to_rd\n return data\n else:\n # copy all of the read buffer\n data += self.rdbuf\n bytes_to_rd -= n\n # read more data...\n except usb.core.USBError as e:\n raise usbdev_error(str(e))\n # never reached\n raise usbdev_error(\"internal error\")", "def readchar(self) -> int:", "def test_getreader(self):\n reader = codecs.getreader('imap4-utf-7')(BytesIO(b'Hello&AP8-world'))\n self.assertEqual(reader.read(), u'Hello\\xffworld')", "def decode(self, size=1):\n while len(self.__buf) < size:\n self.__buf += self.__lib.decompress(self.__ref.read(1))\n buf, self.__buf = self.__buf[:size], self.__buf[size:]\n return buf", "def read(self, nbytes, /) -> bytes | None:", "def read_R4W(self):\n self.write(':FETC?')\n sleep(0.1)\n msg = self.read()\n #print ('read_R4W msg:', msg)\n v = msg.split(',')[0].rstrip('NOHM4W').strip()\n if v[-1] == 'R':\n return float(v[:-1])\n else:\n return float(v)", "def read(self, num_bytes_to_read):\n pass", "def readfrom_into(self, addr: int, buf: bytes, stop: bool = True, /) -> None:", "def readfrom_into(self, addr: int, buf: bytes, stop: bool = True, /) -> None:", "def read(self, nbytes: int, /) -> bytes | None:", "def read_and_unpack(self, fmt):\n try:\n return unpack(\n self.byte_order + fmt,\n self.read(calcsize(self.byte_order + fmt)))\n except Exception as e:\n if e.args[0].startswith('unpack requires a buffer of'):\n raise EOFError(e)\n else:\n raise", "def read():\n # TODO", "def read_char(self):\n return self._packers[\"b\"].unpack(self.read(1))[0]", "def _get_data(self, read_size):\n if NIX:\n return super(Keyboard, self)._get_data(read_size)\n return self._pipe.recv_bytes()", "def read(self, num_of_byte: int=1) -> bytes:\n if num_of_byte < 0:\n return self.stream.read()\n if self._is_buffer_full():\n return self.stream.read(num_of_byte)\n\n buffer_len = len(self._buffer)\n if buffer_len == self._buffer_pointer + 1: # all real read\n data = self.stream.read(num_of_byte)\n self._buffer += data\n self._buffer_pointer = self._buffer_pointer + num_of_byte\n return data\n elif buffer_len - (self._buffer_pointer + 1) >= num_of_byte: # all from buffer\n self._buffer_pointer += num_of_byte\n return bytes(self._buffer[self._buffer_pointer - num_of_byte + 1: self._buffer_pointer + 1])\n elif buffer_len - (self._buffer_pointer + 1) < num_of_byte: # one part from buffer and the other real read\n data_buffer_part = self._buffer[self._buffer_pointer + 1:]\n remained_not_read_num = num_of_byte - (buffer_len - (self._buffer_pointer + 1))\n data_read_part = self.stream.read(remained_not_read_num)\n self._buffer += data_read_part\n self._buffer_pointer += num_of_byte\n return bytes(data_buffer_part + data_read_part)", "def _readString(self, rawData, offset=0):\n\n strLen, = unpack(\n self.byteFormat, rawData[\n offset:offset + self.byteFormatLen])\n\n return rawData[self.byteFormatLen:][:strLen]", "def bytes_to_ipv4_str(buff):\n if len(buff) != DataDescription.B_SEQ_IPv4_LEN:\n raise TypeError(\"Invalid input\")\n return \"%u.%u.%u.%u\" % buff", "def read(self, size=-1):\n\n if size < 0:\n raise NotImplementedError(\"Don't be greedy, that could be massive!\")\n elif size == 0:\n if self._text:\n return \"\"\n else:\n return b\"\"\n elif self._within_block_offset + size <= len(self._buffer):\n # This may leave us right at the end of a block\n # (lazy loading, don't load the next block unless we have too)\n data = self._buffer[self._within_block_offset:self._within_block_offset + size]\n self._within_block_offset += size\n assert data # Must be at least 1 byte\n return data\n else:\n # if read data overflows to next block\n # pull in rest of data in current block\n data = self._buffer[self._within_block_offset:]\n\n # decrement size so that we only pull the rest of the data\n # from next block\n size -= len(data)\n self._load_block() # will reset offsets\n\n if not self._buffer:\n return data # EOF\n\n # if there is still more to read\n elif size:\n # pull rest of data from next block\n return data + self.read(size)\n else:\n # Only needed the end of the last block\n return data", "def _readBytes(self, len):\n return self.socket.recv(len)", "def _read_four_byte_numbers_in_table(self, table, index):\n\n return self.stream.read_four_byte_numbers(self._position_in_table(table, index))", "def read_bytes(self) -> bytes:\n t = self.pc\n while self.data[self.pc] != 0:\n self.pc += 1\n result = self.data[t:self.pc]\n self.pc += 1 # jump '\\0'\n return result", "def read_fd_decode_safely(fd, size=4096):\n data = os.read(fd.fileno(), size)\n for _ in range(3):\n try:\n return data, data.decode(\"utf-8\")\n except UnicodeDecodeError as e:\n if e.reason != \"unexpected end of data\":\n raise\n data += os.read(fd.fileno(), 1)\n\n return data, data.decode(\"utf-8\")", "def decode_buffer(buf):\n return buf.getvalue().decode('utf-8')", "def _readBytes(self, len):\n return self.stream.read(len)", "def reads(self, n):\n return self.file.read(n).decode('iso-8859-1')", "def Read(buf: IO[bytes]) -> Optional[bytes]:\n count_bytes = buf.read(_UINT64.size)\n if not count_bytes:\n return None\n\n try:\n (count,) = _UINT64.unpack(count_bytes)\n except struct.error as error:\n raise ValueError(f\"Incorrect size tag {count_bytes}: {error}\")\n\n # It might happen that we are given file with incorrect format. If the size\n # tag is interpreted as a huge number, reading the buffer will lead to raising\n # an exception, because Python will try to allocate a buffer to read into. If\n # possible, we try to check guard against such situations and provide more\n # informative exception message.\n\n def Error(left: int) -> ValueError:\n message = f\"Malformed input (reading {count} bytes out of {left} available)\"\n return ValueError(message)\n\n if buf.seekable():\n position = buf.tell()\n\n buf.seek(0, os.SEEK_END)\n size = buf.tell()\n\n if count > size - position:\n raise Error(size - position)\n\n buf.seek(position, os.SEEK_SET)\n\n chunk = buf.read(count)\n if len(chunk) != count:\n raise Error(len(chunk))\n\n return chunk", "def read_message(self):\n text_length_bytes = self.input_fh.read(4)\n logging.debug(\"raw 4: %s\", text_length_bytes)\n if not text_length_bytes:\n # this means exit\n shutdown()\n\n text_length = struct.unpack(\"i\", text_length_bytes)[0]\n logging.debug(\"reading message of length: %s\", text_length)\n msg = self.input_fh.read(text_length).decode()\n logging.debug(\"message is %s\", msg)\n return msg", "def _serial_read(self, size):\n self.write([self.SERIAL_IO])\n resp = self.read(size)\n data = self.decode(resp)\n return data", "def _read_bytes(self, start, num_bytes):\n with self._fp_lock:\n self._fp.seek(start)\n return self._fp.read(num_bytes)", "def read(self, size=-1):\n _complain_ifclosed(self._closed)\n buf = self._buf\n while size < 0 or len(buf) < size:\n try:\n buf = buf + next(self._generator)\n except StopIteration:\n break\n\n returned = b\"\"\n if size >= 1:\n self._buf = buf[size:]\n returned = buf[:size]\n else:\n self._buf = b\"\"\n returned = buf\n\n self._position = self._position + len(returned)\n return returned", "def rcvString(self, num=1):\r\n\t\t# verifico quanti caratteri ci sono gia' nel buffer\r\n\t\tcou = self.ser.inWaiting()\r\n\t\tif cou > 0:\r\n\t\t\tif cou >= num:\r\n\t\t\t\t# provo la ricezione\r\n\t\t\t\tdat = self.ser.read(num)\r\n\t\t\telse:\r\n\t\t\t\tdat = self.ser.read(cou)\r\n\t\t\t\t# dati letti\r\n\t\t\t\tnum = cou\r\n\t\telse:\r\n\t\t\tdat = None\r\n\t\t\tnum = 0\r\n\t\treturn (num, dat)", "def read(self, nbytes: int, /) -> Optional[bytes]:", "def read(self, w):\n pass", "def readbuf_slow(self, n):\n msg = ''\n self.sock.setblocking(0)\n try:\n for i in range(n):\n msg += self.sock.recv(1)\n except BaseException:\n pass\n self.sock.setblocking(1) # belt and suspenders\n self.settimeout(self.__timeout)\n return msg", "def read(cls, buf):\n has_bytes = buf.remaining()\n if has_bytes < 1:\n raise NeedBytes, 1\n\n first = buf.read_uchar()\n\n size = (((first & 0xc0) >> 6) ^ 0x3) << 2\n if size == 0:\n size = 1\n\n if has_bytes < size:\n raise NeedBytes, size-has_bytes\n\n object_id = first & 0x3f\n timestamp = length = type = stream_id = None\n\n if size != 1:\n timestamp = buf.read_24bit_uint()\n\n if size >= 8:\n length = buf.read_24bit_uint()\n type = buf.read_uchar()\n\n if size == 12:\n stream_id = buf.read_ulong()\n\n return RTMPHeader(object_id, timestamp, length, type, stream_id)", "def read(self, n=None):\n l = []\n if n is None or n < 0:\n while True:\n m = self._read1()\n if not m:\n break\n l.append(m)\n else:\n while n > 0:\n m = self._read1(n)\n if not m:\n break\n n -= len(m)\n l.append(m)\n return b''.join(l)", "def read_string(self, block_size, double_byte=False):\n fmt = ('B' if not double_byte else 'H') * block_size\n string = ''\n if double_byte:\n db_string = self.read(fmt)\n for c in db_string[:-1]:\n string += uchr(c)\n else:\n b_string = self.read(fmt)\n for c in b_string:\n string += uchr(c)\n return string", "def _read_v2(self):\n return self.usb_dev.read(self.ep_in, self.rdbuf_chunksize, self.usb_rd_timeout)", "def read32(bytestream):\n dt = np.dtype(np.uint32).newbyteorder('>')\n return np.frombuffer(bytestream.read(4), dtype=dt)[0]", "def read32(bytestream):\n dt = np.dtype(np.uint32).newbyteorder('>')\n return np.frombuffer(bytestream.read(4), dtype=dt)[0]", "def read(self, size=1):\n \n data = self.fp.read(1)\n if data == '':\n self.fp.seek(0)\n data = self.fp.read(1)\n \n return data", "def reads(self, n):\n val = self.f.read(n)\n self.cs and self.cs.add(val)\n try:\n val = val.decode('utf-8')\n except:\n if self.debug_level > 5:\n print(\"ERROR DECODING: {}\".format(val))\n pass\n return val", "def read(self, num=1):\n contents = self.stream.read(num)\n self.bitsRead += len(contents)\n self.observerRead(contents)\n return contents", "def read( shell, maxbytes=1024 ):\n global readbuf\n count = len( readbuf )\n if count < maxbytes:\n data = os.read( shell.stdout.fileno(), maxbytes - count )\n readbuf += data\n if maxbytes >= len( readbuf ):\n result = readbuf\n readbuf = ''\n else:\n result = readbuf[ :maxbytes ]\n readbuf = readbuf[ maxbytes: ]\n return result", "def __read_until(self, buffer, char, break_space=False):\n\t\tret = []\n\t\ttoken = buffer.read(1)\n\t\twhile token != char:\n\t\t\tif break_space and token.isspace():\n\t\t\t\treturn\n\t\t\tret.append(token)\n\t\t\ttoken = buffer.read(1)\n\t\t\tif not token:\n\t\t\t\tbreak\n\t\treturn \"\".join(ret)", "def read_serial(self, num_expected):\n\n\t\ttotal_received = 0\n\t\tread_chars = \"\"\t\n\t\n\t\twhile total_received < num_expected:\n\t\t\tiw = self.ser.inWaiting()\n\n\t\t\tif iw > num_expected:\n\t\t\t\tiw = num_expected\n\t\t\tread_chars = read_chars + self.ser.read(iw)\n\t\t\ttotal_received += iw\n\t\t\ttime.sleep(0.001)\n\t\treturn read_chars", "def _read_len(self):\n\n read = self.socket.recv(4)\n if len(read) == 0:\n # if we read 0 bytes and self.message is empty, it means client\n # closed the connection\n if len(self.message) != 0:\n logging.error(\"can't read frame size from socket\")\n self.close()\n return\n self.message += read\n if len(self.message) == 4:\n self.len, = struct.unpack(b'!i', self.message)\n if self.len < 0:\n logging.error(\"negative frame size, it seems client\"\\\n \" doesn't use FramedTransport\")\n self.close()\n elif self.len == 0:\n logging.error(\"empty frame, it's really strange\")\n self.close()\n else:\n self.len += 4 # Include message length\n self._set_status(WAIT_MESSAGE)", "def _read_contact_4(self, data, ndata):\n return self._read_geom_4(self._edt_map, data, ndata)", "def read(self, s):\n pass", "def read1(self, bytecount: int = -1) -> bytes:\n try:\n out = self._buffer or next(self)\n if bytecount and bytecount > 0:\n out, self._buffer = out[:bytecount], out[bytecount:]\n elif self._buffer:\n self._buffer = B''\n return out\n except StopIteration:\n return B''", "def _read_i2c(fd, n):\n if n == 0:\n return b''\n buf = os.read(fd, n)\n if len(buf) != n:\n raise OSError(errno.EIO, os.strerror(errno.EIO))\n return buf", "def extract_chars(infile, n=10000):\n reader = partial(get_chars, n)\n return read_on(reader, infile)", "def read_bytes(self, size):\n return self.read('bytes:'+str(size))", "def read(self, *args, **kwargs):\r\n buf = io.BufferedReader.read(self, *args, **kwargs)\r\n self.increment(len(buf))\r\n return buf", "def read_ascii_line(self):\n str = ''\n empties = 0\n while(empties < 5 and str[-2:] != '\\r\\n'):\n time.sleep(.1)\n newdata = self.read()\n str += newdata\n if newdata:\n empties = 0\n else:\n empties += 1\n if empties: # last result must have gotten data, so empties should be zero\n raise LabProTimeout(\n 'timeout getting ascii data, current result: ' + repr(str))\n return str.strip()", "def read(self) -> bytes:\n line = self.device.readline()\n if len(line) > 0 and line[-1] == 10:\n line += self.device.readline()\n return line", "def _raw_record_reader(stream):\n while True:\n header = stream.read(4)\n if len(header) < 4:\n return\n size, rec_type = struct.unpack(\">HH\", header)\n rec_type = rec_type // 256\n yield (rec_type, header + stream.read(size - 4))", "def readline(self):\n while(True):\n rxcount = self.in_waiting \n if rxcount > 0: \n for pos, i in enumerate(self.buffer):\n # look for the \\n\n if i == 10: \n line=''\n linebuf = self.buffer[:pos]\n self.buffer = self.buffer[pos+1:]\n for c in linebuf:\n line += chr(c)\n return line" ]
[ "0.59958696", "0.59186286", "0.59186286", "0.5890578", "0.5830287", "0.58239377", "0.57918686", "0.5736559", "0.57338023", "0.5728563", "0.5623694", "0.5568525", "0.55537534", "0.55537534", "0.5420426", "0.54010665", "0.53901047", "0.5340353", "0.532755", "0.5276123", "0.5266699", "0.52222484", "0.5216403", "0.51992154", "0.5162364", "0.51580733", "0.5143916", "0.5132609", "0.5125459", "0.51172495", "0.5113502", "0.51117754", "0.5110686", "0.51031816", "0.50920945", "0.50603074", "0.5053797", "0.5053797", "0.50474554", "0.5035259", "0.5008224", "0.5005807", "0.5005544", "0.5000865", "0.49670854", "0.49426365", "0.49360418", "0.49228987", "0.49128947", "0.49028242", "0.49028242", "0.49003404", "0.4897887", "0.48946705", "0.4891988", "0.48904064", "0.48855606", "0.48848265", "0.48819733", "0.48778668", "0.48613316", "0.48580593", "0.4856686", "0.4848014", "0.4838984", "0.48245358", "0.4822909", "0.48143414", "0.4811287", "0.4804163", "0.47983855", "0.47962376", "0.4787264", "0.47868708", "0.47834826", "0.47827014", "0.47741386", "0.47685897", "0.47657582", "0.47632593", "0.47593012", "0.47593012", "0.4745319", "0.47450516", "0.47431502", "0.47388583", "0.47306746", "0.47300977", "0.4729283", "0.47265518", "0.4698261", "0.46953934", "0.46881703", "0.46863925", "0.4685974", "0.46857232", "0.46805105", "0.46777257", "0.46773627", "0.46750298" ]
0.81999147
0
read n chars to buf, called multiple times
def read(self, buf, n): l = min(len(self.prev), n) buf[:l] = self.prev[:l] self.prev = self.prev[l:] # pitfall self.prev = [] idx = l # the next reading while idx < n: buf4 = ["" for _ in xrange(4)] r = read4(buf4) if idx+r < n: buf[idx:idx+r] = buf4[:r] idx += r if r < 4: return idx else: buf[idx:n] = buf4[:n-idx] self.prev = buf4[n-idx:r] # pitfall buf4[n-idx:] idx = n return idx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readbuf(self, n):\n if n == 0:\n return ''\n try:\n msg = self.sock.recv(n)\n except BaseException:\n msg = ''\n n2 = min(n - len(msg), n / 2)\n return msg + self.readbuf(n2)", "def read(self, n=1):\n s = self._RX_buf[0:n]\n self._RX_buf = self._RX_buf[n:]\n # print(\"read op occurred: RX_buf = {}\".format(self._RX_buf), end='\\n\\n')\n return s # bytes(s, encoding='ascii')", "def readbuf_slow(self, n):\n msg = ''\n self.sock.setblocking(0)\n try:\n for i in range(n):\n msg += self.sock.recv(1)\n except BaseException:\n pass\n self.sock.setblocking(1) # belt and suspenders\n self.settimeout(self.__timeout)\n return msg", "def read(self, n):\n msg = self.readexactly(n)\n n2 = n - len(msg)\n if n2 > 0:\n msg += self.readbuf(n2)\n return msg", "def read(self, n):\n buffer = [] #Buffer for storing digits to output\n \n #While more digits needed (and limit not reached), add more digits\n while len(buffer) < n and self.digits_read < self.limit:\n #Get the next character\n char = self.file.read(1)\n #If out of characters, end search\n if char == '':\n self.file.close()\n self.file = None\n break\n #Only add numerical digits to the buffer\n if char.isdigit():\n buffer.append(int(char))\n self.digits_read += 1\n \n #Return digits\n return buffer", "def socket_read_n(self, n):\n # server_log.debug('entering socket_read_n loop')\n buf = '' \n while n > 0:\n\n server_log.debug('listening...')\n data = self.sock.recv(n)\n if data == '':\n raise RuntimeError('unexpected connection close')\n # server_log.debug('no response')\n # time.sleep(5)\n buf += data\n n -= len(data)\n\n # server_log.debug('return from socket_read_n')\n \n return buf", "def readinto(self, buf: bytes, nack: bool = True, /) -> None:", "def readinto(self, buf: bytes, nack: bool = True, /) -> None:", "def _read_nowait(self, n: int) -> bytes:\n ...", "def read_nchars(string, n=1):\n return string[:n]", "def read_count(f, n):\n buf = ''\n while len(buf) < n:\n nextchunk = f.read(n - len(buf))\n if not nextchunk:\n return ''\n buf += nextchunk\n return buf", "def extract_chars(infile, n=10000):\n reader = partial(get_chars, n)\n return read_on(reader, infile)", "def readinto(self, buf: bytes, nbytes: int, /) -> Optional[int]:", "def readexactly(self, n):\n if n < 0:\n raise ValueError('readexactly size can not be less than zero')\n\n if self._exception is not None:\n raise self._exception\n\n if n == 0:\n return b''\n\n while len(self._buffer) < n:\n if self._eof:\n incomplete = bytes(self._buffer)\n self._buffer.clear()\n raise IncompleteReadError(incomplete, n)\n\n yield from self._wait_for_data('readexactly')\n\n if len(self._buffer) == n:\n data = bytes(self._buffer)\n self._buffer.clear()\n else:\n data = bytes(self._buffer[:n])\n del self._buffer[:n]\n self._maybe_resume_transport()\n return data", "def read(self, n=None):\n l = []\n if n is None or n < 0:\n while True:\n m = self._read1()\n if not m:\n break\n l.append(m)\n else:\n while n > 0:\n m = self._read1(n)\n if not m:\n break\n n -= len(m)\n l.append(m)\n return b''.join(l)", "def read( self, bytes=1024 ):\n count = len( self.readbuf )\n if count < bytes:\n data = os.read( self.stdout.fileno(), bytes - count )\n self.readbuf += data\n if bytes >= len( self.readbuf ):\n result = self.readbuf\n self.readbuf = ''\n else:\n result = self.readbuf[ :bytes ]\n self.readbuf = self.readbuf[ bytes: ]\n return result", "def readinto(self, buf: Any, nbytes: int=-1) -> int:\n ...", "def recv(self, n=4096):\n if len(self._buf) < n:\n buf = self._s.recv(65536)\n if not buf and not self._buf:\n raise DisconnectException(\"Server disconnected.\")\n if self._verbose:\n self._prettyprint(buf, False)\n self._buf += buf\n\n # This code also works if n > len(self._buf)\n buf = self._buf[:n]\n self._buf = self._buf[n:]\n return buf", "def read(self, *args, **kwargs):\r\n buf = io.BufferedReader.read(self, *args, **kwargs)\r\n self.increment(len(buf))\r\n return buf", "def readinto(self, buf: bytearray, nack: bool = True) -> None:\n ...", "def _read_i2c(fd, n):\n if n == 0:\n return b''\n buf = os.read(fd, n)\n if len(buf) != n:\n raise OSError(errno.EIO, os.strerror(errno.EIO))\n return buf", "def read(self, num_bytes_to_read):\n pass", "def readinto(self, buf: AnyWritableBuf, nbytes: int, /) -> int | None:", "def read(self, n=-1):\n\n if self._exception is not None:\n raise self._exception\n\n if n == 0:\n return b''\n\n if n < 0:\n # This used to just loop creating a new waiter hoping to\n # collect everything in self._buffer, but that would\n # deadlock if the subprocess sends more than self.limit\n # bytes. So just call self.read(self._limit) until EOF.\n blocks = []\n while True:\n block = yield from self.read(self._limit)\n if not block:\n break\n blocks.append(block)\n return b''.join(blocks)\n\n if not self._buffer and not self._eof:\n yield from self._wait_for_data('read')\n\n # This will work right even if buffer is less than n bytes\n data = bytes(self._buffer[:n])\n del self._buffer[:n]\n\n self._maybe_resume_transport()\n return data", "def _read_chunked(self):\r\n buf = \"\"\r\n size = 1\r\n while size:\r\n size = int(self._read_line(), 16)\r\n buf += self._read_num_bytes(size)\r\n self._read_num_bytes(2) # CRLF\r\n return buf", "def readinto(self, buf: bytes, /) -> Optional[int]:", "async def readexactly(self,\n n: int\n ) -> bytes:\n if n < 1:\n return b''\n\n future = asyncio.Future()\n try:\n self._read_queue.put_nowait((future, True, n))\n return await future\n\n except aio.QueueClosedError:\n raise ConnectionError()", "def read(self, n):\n logger.debug(\"Reading {} bytes...\".format(n))\n bytes_ = self.impl.read(n)\n logger.debug(\"Received: {} bytes\".format(len(bytes_)))\n return bytes_", "def _read_num_bytes(self, num):\r\n buf = \"\"\r\n while len(buf) < num:\r\n chunk = self.sock.recv(num - len(buf))\r\n if not chunk:\r\n raise SocketClosedException\r\n buf += chunk\r\n return buf", "def read(self, n):\n return self.file.read(n)", "def reads(self, n):\n val = self.f.read(n)\n self.cs and self.cs.add(val)\n try:\n val = val.decode('utf-8')\n except:\n if self.debug_level > 5:\n print(\"ERROR DECODING: {}\".format(val))\n pass\n return val", "async def read(self,\n n: int = -1\n ) -> bytes:\n if n == 0:\n return b''\n\n future = asyncio.Future()\n try:\n self._read_queue.put_nowait((future, False, n))\n return await future\n\n except aio.QueueClosedError:\n raise ConnectionError()", "def read(self, n=None):\n l = []\n if n is None or n < 0:\n while True:\n m = self._read1()\n if not m:\n break\n l.append(m)\n else:\n while n > 0:\n m = self._read1(n)\n if not m:\n break\n n -= len(m)\n l.append(m)\n return u''.join(l)", "def consume (self, n) :\r\n if (n<0 or n>len(self)) :\r\n m = \"Trying to consume more data than in Circ. Buff\"\r\n raise Exception(m)\r\n \r\n self.empty_ = (n==len(self))\r\n self.nextGet_ = (self.nextGet_+n) % self.capacity()", "def create_buffer(self, cursor, n):\n buffer = []\n for c in cursor:\n buffer.append(c)\n if len(buffer) >= n:\n yield buffer\n buffer = []\n yield buffer", "def getcharsposix(n):\n\t\n\tfd = sys.stdin.fileno()\n\toldSettings = termios.tcgetattr(fd)\n\tstring = \"\"\n\ti = 0\n\t# Loop until we get N chars\n\twhile i <= n:\n\t\t# Do some magic\n\t\ttry:\n\t\t\ttty.setcbreak(fd)\n\t\t\tanswer = sys.stdin.read(1)\n\t\t\tif answer == b'\\x03':\n\t\t\t\traise KeyboardInterrupt()\n\t\t\ttry:\n\t\t\t\tstring += str(answer, ENCODING)\n\t\t\texcept UnicodeDecodeError:\n\t\t\t\tcontinue\n\t\tfinally:\n\t\t\ttermios.tcsetattr(fd, termios.TCSADRAIN, oldSettings)\n\t\t\ti += 1\n\t# Return string\n\treturn string", "def read4(buf):\n return 0", "def _read_to_buffer(cls, buf, stream):\n # We could read it in one step, but instead we'll read it in chunks to avoid big temporaries.\n # (See below.)\n # buf[:] = stream.read( len(buf) )\n\n # Read data from the stream in chunks\n remaining_bytes = len(buf)\n while remaining_bytes > 0:\n next_chunk_bytes = min( remaining_bytes, VoxelsNddataCodec.STREAM_CHUNK_SIZE )\n chunk_start = len(buf)-remaining_bytes\n chunk_stop = len(buf)-(remaining_bytes-next_chunk_bytes)\n buf[chunk_start:chunk_stop] = stream.read( next_chunk_bytes )\n remaining_bytes -= next_chunk_bytes", "def read(self, n=1):\n return self.string[self.pos:self.pos + n]", "async def read(self, n: int = -1) -> AnyStr:\n\n # load file\n if len(self._buffer) == 0 and \"r\" in self.mode:\n await self._download()\n\n # check size\n if n == -1:\n data = self._buffer\n self._pos = len(self._buffer) - 1\n else:\n # extract data to read\n data = self._buffer[self._pos : self._pos + n]\n self._pos += n\n\n # return data\n return data", "async def _read(self, n):\n return await self._reader.readexactly(n)", "def get_chars(n:int, f) -> Union[str, None]:\n chars = f.read(n)\n return read_outcomes(chars)", "def serial_read(self, size):\n line=''\n # How many chars in the buffer\n actualsize = len(self.buffer)\n # maximal the avialable chars\n if size > actualsize:\n size = actualsize\n linebuf = self.buffer[:size]\n self.buffer = self.buffer[size:]\n for c in linebuf:\n line += chr(c)\n return line", "def read(self, n=1):\n return 0", "def _read_fixed(buf, length):\n result = buf.read(length)\n actual = len(result)\n if actual != length:\n raise EndOfMessage(False if actual == 0 else True)\n return result", "def read(self, l):\n\t\twhile l > len(self.buf):\n\t\t\tself.buf += self.conn.recv(4096)\n\n\t\tobuf = self.buf\n\t\tself.buf = obuf[l:]\n\t\treturn obuf[:l]", "def cstringio_refill(self, partialread, reqlen):\r\n pass", "def readinto(self, buf: bytearray, nbytes: Optional[int] = None) \\\n -> Optional[int]:\n ...", "def readinto(self, buf: bytes, write: int = 0x00, /) -> Optional[int]:", "def readinto(self, buf: bytes, write: int = 0x00, /) -> Optional[int]:", "def _recv_nbytes_from_socket(self, socket, n):\n\n output = []\n bytes_read = 0\n while True:\n chunk = socket.recv(n)\n if chunk != '':\n output.append(chunk)\n bytes_read += len(chunk)\n if bytes_read >= n:\n break\n return ''.join(output)", "def reads(self, n):\n return self.file.read(n).decode('iso-8859-1')", "def read(self, nChar=None):\n raise NotImplementedError()", "def readline(self):\n while(True):\n rxcount = self.in_waiting \n if rxcount > 0: \n for pos, i in enumerate(self.buffer):\n # look for the \\n\n if i == 10: \n line=''\n linebuf = self.buffer[:pos]\n self.buffer = self.buffer[pos+1:]\n for c in linebuf:\n line += chr(c)\n return line", "def read(self, nbytes: int, /) -> bytes | None:", "def read(self, nbytes, /) -> bytes | None:", "def chars(count):\n\n global offset\n\n bytes=midifile[offset:offset+count]\n offset+=count\n return bytes", "def read( shell, maxbytes=1024 ):\n global readbuf\n count = len( readbuf )\n if count < maxbytes:\n data = os.read( shell.stdout.fileno(), maxbytes - count )\n readbuf += data\n if maxbytes >= len( readbuf ):\n result = readbuf\n readbuf = ''\n else:\n result = readbuf[ :maxbytes ]\n readbuf = readbuf[ maxbytes: ]\n return result", "def read_bytes(self, number_of_bytes):\n\n self.index = -1\n data = self.buf[self.offset:self.offset + number_of_bytes]\n self.offset += number_of_bytes\n\n return data", "def eat(seq, n=None):\n if n is None:\n collections.deque(seq, maxlen=0)\n else:\n next(itertools.islice(seq, n, n), None)", "def recv(client, n):\n chunk = b''\n while n > 0:\n ch = client.recv(n)\n if ch == b'':\n raise EOFError()\n chunk += ch\n n -= len(ch)\n assert(n == 0)\n return chunk", "def _buffer(self, n=None):\n if self._out_of_scope:\n raise ResultConsumedError(self, _RESULT_OUT_OF_SCOPE_ERROR)\n if self._consumed:\n raise ResultConsumedError(self, _RESULT_CONSUMED_ERROR)\n if n is not None and len(self._record_buffer) >= n:\n return\n record_buffer = deque()\n for record in self:\n record_buffer.append(record)\n if n is not None and len(record_buffer) >= n:\n break\n if n is None:\n self._record_buffer = record_buffer\n else:\n self._record_buffer.extend(record_buffer)\n self._exhausted = not self._record_buffer", "def read_list(self, n):\n i = self.pos\n ret = self._buffer[i:i + n]\n if len(ret) < n:\n raise self._eof\n\n self.pos += n\n return ret", "def read(self, n):\n assert self._read_future is None, \"Concurrent reads detected\"\n\n read_future = Future(self._loop)\n\n if self._unread_bytes or self._eof_recvd:\n read_future.set_result(self._unread_bytes)\n self._unread_bytes = b''\n else:\n self._read_future = read_future\n def read_future_done(_):\n self._read_future = None\n read_future.add_done_callback(read_future_done)\n\n return read_future", "def _send_from_buffer(cls, buf, stream):\n remaining_bytes = len(buf)\n while remaining_bytes > 0:\n next_chunk_bytes = min( remaining_bytes, VoxelsNddataCodec.STREAM_CHUNK_SIZE )\n chunk_start = len(buf)-remaining_bytes\n chunk_stop = len(buf)-(remaining_bytes-next_chunk_bytes)\n stream.write( buf[chunk_start:chunk_stop] )\n remaining_bytes -= next_chunk_bytes", "def ReadAll(buf: IO[bytes]) -> Iterator[bytes]:\n while True:\n chunk = Read(buf)\n if chunk is None:\n return\n\n yield chunk", "def read(self, size=-1):\n if not self._buf:\n self._buf.append(next(self._iter, b''))\n if len(self._buf[0]) < size or size < 0:\n return self._buf.pop(0)\n block = self._buf.pop(0)\n self._buf.insert(0, block[size:])\n return block[:size]", "def read_until_size(self, size):\n if not size:\n do_return(b'')\n with self.reading:\n while len(self.read_buffer) < size:\n self.read_buffer.enqueue((yield self.base.read(self.bufsize)))\n do_return(self.read_buffer.dequeue(size))", "def read(self, size=-1):\n _complain_ifclosed(self._closed)\n buf = self._buf\n while size < 0 or len(buf) < size:\n try:\n buf = buf + next(self._generator)\n except StopIteration:\n break\n\n returned = b\"\"\n if size >= 1:\n self._buf = buf[size:]\n returned = buf[:size]\n else:\n self._buf = b\"\"\n returned = buf\n\n self._position = self._position + len(returned)\n return returned", "def input_buffer_peek_n(self, n):\n assert self.curr_input_buff_idx + n - 1 <= len(self.input_buffer)\n return self.input_buffer[self.curr_input_buff_idx:self.curr_input_buff_idx+n]", "def __read_block(self, size):\n buf = b\"\"\n if len(self.__read_buffer):\n limit = (\n size if size <= len(self.__read_buffer) else\n len(self.__read_buffer)\n )\n buf = self.__read_buffer[:limit]\n self.__read_buffer = self.__read_buffer[limit:]\n size -= limit\n if not size:\n return buf\n try:\n buf += self.sock.recv(size)\n except (socket.timeout, ssl.SSLError):\n raise Error(\"Failed to read %d bytes from the server\" % size)\n self.__dprint(buf)\n return buf", "def recvn(self, n):\n data = []\n while len(data) != n:\n data.append(self.recv(1))\n\n return b''.join(data)", "def read_ascii_line(self):\n str = ''\n empties = 0\n while(empties < 5 and str[-2:] != '\\r\\n'):\n time.sleep(.1)\n newdata = self.read()\n str += newdata\n if newdata:\n empties = 0\n else:\n empties += 1\n if empties: # last result must have gotten data, so empties should be zero\n raise LabProTimeout(\n 'timeout getting ascii data, current result: ' + repr(str))\n return str.strip()", "def read_count(buffer, offset, count):\n\n return buffer[offset:offset + count]", "def read(self, nbytes: int, write: int = 0x00, /) -> bytes:", "def read(self, nbytes: int, write: int = 0x00, /) -> bytes:", "def next_n_bytes(packet, n):\n ret = packet[:n]\n remaining_packet = packet[n:]\n return bytes(ret), bytes(remaining_packet)", "def _recvall(conn, n):\n data = b''\n while len(data) < n:\n packet = conn.recv(n - len(data))\n if not packet:\n return None\n data += packet\n return data", "def read(self, size=-1):\n ...", "def recvall(sock, n):\n\n data = ''\n while len(data) < n:\n packet = sock.recv(n - len(data))\n if not packet:\n return None\n data += packet\n return data", "def readexactly(self, n):\n t0 = time.time()\n msg = \"\"\n timeout = self.gettimeout()\n while len(msg) < n:\n newtimeout = timeout - (time.time() - t0)\n if newtimeout <= 0.0:\n break\n self.settimeout(newtimeout)\n try:\n msg = self.sock.recv(n, socket.MSG_PEEK)\n except BaseException:\n pass\n # Flush the message out if you got everything\n if len(msg) == n:\n msg = self.sock.recv(n).decode()\n # Otherwise tell nothing and leave the data in the buffer\n else:\n msg = ''\n self.settimeout(timeout)\n return msg", "def i2c_read_repeated(self, addrs, size):\n buf = [0x00, 0x93]\n return self._i2c_read(addrs, size, buf)", "def _popN(self, n):\n for _ in range(n):\n self._buffer.popleft()", "def n_char(self,char,n,w=1,h=1):\n for i in range(n):\n self.esprint(char,w,h)", "def read1(self, bytecount: int = -1) -> bytes:\n try:\n out = self._buffer or next(self)\n if bytecount and bytecount > 0:\n out, self._buffer = out[:bytecount], out[bytecount:]\n elif self._buffer:\n self._buffer = B''\n return out\n except StopIteration:\n return B''", "def _read_bytes(self, start, count): # type: (int) -> bytes\n bytes_data = self._buffer[start:start + count]\n\n if len(bytes_data) != count:\n raise ASN1WantMore('Premature end of input.')\n\n return bytes_data", "def _print_bytes(self, n):\n data = self._buffer[self.pos:self.pos + n]\n print()\n for pos, byte in enumerate(data, start=self.pos):\n char = chr(byte)\n if not char in string.printable or char in string.whitespace:\n char = ''\n print(' {:06x}: {:02x} {}'.format(pos, byte, char))\n\n if len(data) < n:\n raise EOFError('unexpected end of file')", "def read(self, nbytes: int, /) -> Optional[bytes]:", "def read_data(self, size, attempts = 1):\n data = Array('B')\n # do we have all of the data in the read buffer?\n if size <= len(self.rdbuf) - self.rdofs:\n data = self.rdbuf[self.rdofs : self.rdofs + size]\n self.rdofs += size\n return data\n # do we have some of the data in the read buffer?\n if len(self.rdbuf) - self.rdofs > 0:\n data = self.rdbuf[self.rdofs:]\n # do a usb read to get the rest...\n # read from the usb device\n try:\n bytes_to_rd = size - len(data)\n while bytes_to_rd > 0:\n # read from the usb device\n while True:\n self.rdbuf = self._read()\n self.rdofs = 0\n if len(self.rdbuf) > 0:\n break\n else:\n # no data received\n attempts -= 1\n if attempts > 0:\n # try again\n continue\n # return what we have\n return data\n # copy the read buffer into the returned data\n n = len(self.rdbuf)\n if n >= bytes_to_rd:\n # copy a partial read buffer\n data += self.rdbuf[:bytes_to_rd]\n self.rdofs = bytes_to_rd\n return data\n else:\n # copy all of the read buffer\n data += self.rdbuf\n bytes_to_rd -= n\n # read more data...\n except usb.core.USBError as e:\n raise usbdev_error(str(e))\n # never reached\n raise usbdev_error(\"internal error\")", "def _read_into_buffer(self):\n raise NotImplementedError()", "def fillBuffer():\n buff[bufferCounter].next = dataIn", "def readinto(self, buf: AnyWritableBuf, maxlen: int, /) -> int | None:", "def readinto(self, buf: AnyWritableBuf, /) -> int | None:", "def readinto(self, buf: AnyWritableBuf, /) -> int | None:", "def chunk_bytes(buf):\n assert len(buf) >= CHUNK_SIZE / 2\n n = len(buf)\n if n < CHUNK_SIZE:\n yield buf[: CHUNK_SIZE // 2] + buf[-CHUNK_SIZE // 2 :], n\n return\n\n for i in range(0, len(buf), CHUNK_SIZE):\n if i + CHUNK_SIZE <= n:\n yield buf[i : i + CHUNK_SIZE], CHUNK_SIZE\n else:\n yield buf[n - CHUNK_SIZE :], n - i", "def _read_amt(self, byte_count):\n full_msg = bytearray()\n while len(full_msg) < byte_count:\n block = self.request.recv(byte_count - len(full_msg))\n full_msg.extend(block)\n return full_msg", "def consume(self):\n rest = self._buf.read()\n self._buf.seek(0, 0)\n self._buf.truncate(0)\n self._len = 0\n self.append(rest)", "def scroll(self, count: int = 1) -> None:\n\n if count >= 0:\n offset = 0\n else:\n offset = 1\n for i in range(self._chars - 1):\n self._set_buffer(\n self._adjusted_index(i + offset),\n self._get_buffer(self._adjusted_index(i + count)),\n )", "def _read_raw_bytes_multiple(self, size, maxread=512, verbose=0):\n ret = []\n instr = self.visa_handle\n with self.visa_handle.ignore_warning(pyvisa.constants.VI_SUCCESS_MAX_CNT):\n nread = 0\n while nread < size:\n nn = min(maxread, size - nread)\n chunk, status = instr.visalib.read(instr.session, nn)\n ret += [chunk]\n nread += len(chunk)\n if verbose:\n print('_read_raw: %d/%d bytes' % (len(chunk), nread))\n ret = b''.join(ret)\n return ret", "def _filesync_read_buffered(self, size, adb_info, filesync_info):\n # Ensure recv buffer has enough data.\n while len(filesync_info.recv_buffer) < size:\n _, data = self._read_until([constants.WRTE], adb_info)\n filesync_info.recv_buffer += data\n\n result = filesync_info.recv_buffer[:size]\n filesync_info.recv_buffer = filesync_info.recv_buffer[size:]\n return result" ]
[ "0.7296429", "0.7184826", "0.71118164", "0.6764044", "0.65951157", "0.6557776", "0.6513806", "0.6513806", "0.6513788", "0.6481166", "0.6312328", "0.6295568", "0.62502605", "0.62111115", "0.61962515", "0.6186824", "0.6165042", "0.6112361", "0.60271007", "0.60082614", "0.60041124", "0.5955228", "0.5943969", "0.5916292", "0.59119785", "0.58812594", "0.5875636", "0.58718544", "0.58545315", "0.58536303", "0.5829938", "0.581212", "0.58105195", "0.58085376", "0.5793185", "0.57816106", "0.57714146", "0.5769328", "0.57692444", "0.5761162", "0.5742212", "0.5738205", "0.5733336", "0.5731586", "0.5711419", "0.5710832", "0.5708847", "0.57007706", "0.5695705", "0.5695705", "0.5688264", "0.5684701", "0.5649282", "0.5647874", "0.563835", "0.561418", "0.56066453", "0.56064224", "0.5598072", "0.5537459", "0.5510681", "0.55089515", "0.5496441", "0.5494765", "0.54923", "0.5474508", "0.5473763", "0.54637563", "0.5460606", "0.541932", "0.54174995", "0.54117715", "0.5386808", "0.53861123", "0.53845465", "0.53845465", "0.53710335", "0.5370453", "0.5345932", "0.5342447", "0.5337588", "0.53361505", "0.532909", "0.5327234", "0.53182364", "0.53139234", "0.53121066", "0.5306436", "0.5302131", "0.52888286", "0.5287601", "0.5276923", "0.5274152", "0.5274152", "0.5270412", "0.5266827", "0.5246059", "0.5226428", "0.52047944", "0.5196575" ]
0.7302957
0